file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
azure_logcollector.go | err
}
// Machine pool can be an AzureManagedMachinePool for AKS clusters.
_, err = getAzureManagedMachinePool(ctx, managementClusterClient, mp)
if err != nil |
} else {
isWindows = isAzureMachinePoolWindows(am)
}
cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, mp.ObjectMeta)
if err != nil {
return err
}
for i, instance := range mp.Spec.ProviderIDList {
if mp.Status.NodeRefs != nil && len(mp.Status.NodeRefs) >= (i+1) {
hostname := mp.Status.NodeRefs[i].Name
if err := collectLogsFromNode(cluster, hostname, isWindows, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, err)
}
if err := collectVMSSBootLog(ctx, instance, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, errors.Wrap(err, "Unable to collect VMSS Boot Diagnostic logs"))
}
} else {
Logf("MachinePool instance %s does not have a corresponding NodeRef", instance)
Logf("Skipping log collection for MachinePool instance %s", instance)
}
}
return kinderrors.NewAggregate(errs)
}
// CollectInfrastructureLogs collects log from the infrastructure.
// This is currently a no-op implementation to satisfy the LogCollector interface.
func (k AzureLogCollector) CollectInfrastructureLogs(ctx context.Context, managementClusterClient client.Client, c *clusterv1.Cluster, outputPath string) error {
return nil
}
// collectLogsFromNode collects logs from various sources by ssh'ing into the node
func collectLogsFromNode(cluster *clusterv1.Cluster, hostname string, isWindows bool, outputPath string) error {
nodeOSType := azure.LinuxOS
if isWindows {
nodeOSType = azure.WindowsOS
}
Logf("Collecting logs for %s node %s in cluster %s in namespace %s\n", nodeOSType, hostname, cluster.Name, cluster.Namespace)
controlPlaneEndpoint := cluster.Spec.ControlPlaneEndpoint.Host
execToPathFn := func(outputFileName, command string, args ...string) func() error {
return func() error {
return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {
f, err := fileOnHost(filepath.Join(outputPath, outputFileName))
if err != nil {
return err
}
defer f.Close()
return execOnHost(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, f, command, args...)
})
}
}
if isWindows {
// if we initiate to many ssh connections they get dropped (default is 10) so split it up
var errors []error
errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn)))
errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar")))
return kinderrors.NewAggregate(errors)
}
return kinderrors.AggregateConcurrent(linuxLogs(execToPathFn))
}
func getHostname(m *clusterv1.Machine, isWindows bool) string {
hostname := m.Spec.InfrastructureRef.Name
if isWindows {
// Windows host name ends up being different than the infra machine name
// due to Windows name limitations in Azure so use ip address instead.
if len(m.Status.Addresses) > 0 {
hostname = m.Status.Addresses[0].Address
} else {
Logf("Unable to collect logs as node doesn't have addresses")
}
}
return hostname
}
func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureCluster, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}
azCluster := &infrav1.AzureCluster{}
err := managementClusterClient.Get(ctx, key, azCluster)
return azCluster, err
}
func getAzureManagedControlPlane(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureManagedControlPlane, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}
azManagedControlPlane := &infrav1.AzureManagedControlPlane{}
err := managementClusterClient.Get(ctx, key, azManagedControlPlane)
return azManagedControlPlane, err
}
func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) {
key := client.ObjectKey{
Namespace: m.Spec.InfrastructureRef.Namespace,
Name: m.Spec.InfrastructureRef.Name,
}
azMachine := &infrav1.AzureMachine{}
err := managementClusterClient.Get(ctx, key, azMachine)
return azMachine, err
}
func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azMachinePool := &infrav1exp.AzureMachinePool{}
err := managementClusterClient.Get(ctx, key, azMachinePool)
return azMachinePool, err
}
func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1.AzureManagedMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azManagedMachinePool := &infrav1.AzureManagedMachinePool{}
err := managementClusterClient.Get(ctx, key, azManagedMachinePool)
return azManagedMachinePool, err
}
func linuxLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"journal.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise",
),
execToPathFn(
"kern.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-k",
),
execToPathFn(
"kubelet-version.txt",
"PATH=/opt/bin:${PATH}", "kubelet", "--version",
),
execToPathFn(
"kubelet.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "kubelet.service",
),
execToPathFn(
"containerd.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "containerd.service",
),
execToPathFn(
"ignition.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-at", "ignition",
),
execToPathFn(
"cloud-init.log",
"cat", "/var/log/cloud-init.log",
),
execToPathFn(
"cloud-init-output.log",
"cat", "/var/log/cloud-init-output.log",
),
execToPathFn(
"sentinel-file-dir.txt",
"ls", "/run/cluster-api/",
),
execToPathFn(
"cni.log",
"cat", "/var/log/calico/cni/cni.log",
),
}
}
func windowsK8sLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"hyperv-operation.log",
"Get-WinEvent", "-LogName Microsoft-Windows-Hyper-V-Compute-Operational | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Sort-Object TimeCreated | Format-Table -Wrap -Autosize",
),
execToPathFn(
"containerd-containers.log",
"ctr.exe", "-n k8s.io containers list",
),
execToPathFn(
"containerd-tasks.log",
"ctr.exe", "-n k8s.io tasks list",
),
execToPathFn(
"containers-hcs.log",
"hcsdiag", "list",
),
execToPathFn(
"kubelet.log",
`Get-ChildItem "C:\\var\\log\\kubelet\\" | ForEach-Object { if ($_ -match 'log.INFO|err.*.log') { write-output "$_";cat "c:\\var\\log\\kubelet\\$_" } }`,
),
execToPathFn | {
return err
} | conditional_block |
azure_logcollector.go | err
}
// Machine pool can be an AzureManagedMachinePool for AKS clusters.
_, err = getAzureManagedMachinePool(ctx, managementClusterClient, mp)
if err != nil {
return err
}
} else {
isWindows = isAzureMachinePoolWindows(am)
}
cluster, err := util.GetClusterFromMetadata(ctx, managementClusterClient, mp.ObjectMeta)
if err != nil {
return err
}
for i, instance := range mp.Spec.ProviderIDList {
if mp.Status.NodeRefs != nil && len(mp.Status.NodeRefs) >= (i+1) {
hostname := mp.Status.NodeRefs[i].Name
if err := collectLogsFromNode(cluster, hostname, isWindows, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, err)
}
if err := collectVMSSBootLog(ctx, instance, filepath.Join(outputPath, hostname)); err != nil {
errs = append(errs, errors.Wrap(err, "Unable to collect VMSS Boot Diagnostic logs"))
}
} else {
Logf("MachinePool instance %s does not have a corresponding NodeRef", instance)
Logf("Skipping log collection for MachinePool instance %s", instance)
}
}
return kinderrors.NewAggregate(errs)
}
// CollectInfrastructureLogs collects log from the infrastructure.
// This is currently a no-op implementation to satisfy the LogCollector interface.
func (k AzureLogCollector) CollectInfrastructureLogs(ctx context.Context, managementClusterClient client.Client, c *clusterv1.Cluster, outputPath string) error {
return nil
}
// collectLogsFromNode collects logs from various sources by ssh'ing into the node
func | (cluster *clusterv1.Cluster, hostname string, isWindows bool, outputPath string) error {
nodeOSType := azure.LinuxOS
if isWindows {
nodeOSType = azure.WindowsOS
}
Logf("Collecting logs for %s node %s in cluster %s in namespace %s\n", nodeOSType, hostname, cluster.Name, cluster.Namespace)
controlPlaneEndpoint := cluster.Spec.ControlPlaneEndpoint.Host
execToPathFn := func(outputFileName, command string, args ...string) func() error {
return func() error {
return retryWithTimeout(collectLogInterval, collectLogTimeout, func() error {
f, err := fileOnHost(filepath.Join(outputPath, outputFileName))
if err != nil {
return err
}
defer f.Close()
return execOnHost(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, f, command, args...)
})
}
}
if isWindows {
// if we initiate to many ssh connections they get dropped (default is 10) so split it up
var errors []error
errors = append(errors, kinderrors.AggregateConcurrent(windowsInfo(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsK8sLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsNetworkLogs(execToPathFn)))
errors = append(errors, kinderrors.AggregateConcurrent(windowsCrashDumpLogs(execToPathFn)))
errors = append(errors, sftpCopyFile(controlPlaneEndpoint, hostname, sshPort, collectLogTimeout, "/c:/crashdumps.tar", filepath.Join(outputPath, "crashdumps.tar")))
return kinderrors.NewAggregate(errors)
}
return kinderrors.AggregateConcurrent(linuxLogs(execToPathFn))
}
func getHostname(m *clusterv1.Machine, isWindows bool) string {
hostname := m.Spec.InfrastructureRef.Name
if isWindows {
// Windows host name ends up being different than the infra machine name
// due to Windows name limitations in Azure so use ip address instead.
if len(m.Status.Addresses) > 0 {
hostname = m.Status.Addresses[0].Address
} else {
Logf("Unable to collect logs as node doesn't have addresses")
}
}
return hostname
}
func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureCluster, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}
azCluster := &infrav1.AzureCluster{}
err := managementClusterClient.Get(ctx, key, azCluster)
return azCluster, err
}
func getAzureManagedControlPlane(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureManagedControlPlane, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}
azManagedControlPlane := &infrav1.AzureManagedControlPlane{}
err := managementClusterClient.Get(ctx, key, azManagedControlPlane)
return azManagedControlPlane, err
}
func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) {
key := client.ObjectKey{
Namespace: m.Spec.InfrastructureRef.Namespace,
Name: m.Spec.InfrastructureRef.Name,
}
azMachine := &infrav1.AzureMachine{}
err := managementClusterClient.Get(ctx, key, azMachine)
return azMachine, err
}
func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azMachinePool := &infrav1exp.AzureMachinePool{}
err := managementClusterClient.Get(ctx, key, azMachinePool)
return azMachinePool, err
}
func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1.AzureManagedMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}
azManagedMachinePool := &infrav1.AzureManagedMachinePool{}
err := managementClusterClient.Get(ctx, key, azManagedMachinePool)
return azManagedMachinePool, err
}
func linuxLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"journal.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise",
),
execToPathFn(
"kern.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-k",
),
execToPathFn(
"kubelet-version.txt",
"PATH=/opt/bin:${PATH}", "kubelet", "--version",
),
execToPathFn(
"kubelet.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "kubelet.service",
),
execToPathFn(
"containerd.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-u", "containerd.service",
),
execToPathFn(
"ignition.log",
"sudo", "journalctl", "--no-pager", "--output=short-precise", "-at", "ignition",
),
execToPathFn(
"cloud-init.log",
"cat", "/var/log/cloud-init.log",
),
execToPathFn(
"cloud-init-output.log",
"cat", "/var/log/cloud-init-output.log",
),
execToPathFn(
"sentinel-file-dir.txt",
"ls", "/run/cluster-api/",
),
execToPathFn(
"cni.log",
"cat", "/var/log/calico/cni/cni.log",
),
}
}
func windowsK8sLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"hyperv-operation.log",
"Get-WinEvent", "-LogName Microsoft-Windows-Hyper-V-Compute-Operational | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Sort-Object TimeCreated | Format-Table -Wrap -Autosize",
),
execToPathFn(
"containerd-containers.log",
"ctr.exe", "-n k8s.io containers list",
),
execToPathFn(
"containerd-tasks.log",
"ctr.exe", "-n k8s.io tasks list",
),
execToPathFn(
"containers-hcs.log",
"hcsdiag", "list",
),
execToPathFn(
"kubelet.log",
`Get-ChildItem "C:\\var\\log\\kubelet\\" | ForEach-Object { if ($_ -match 'log.INFO|err.*.log') { write-output "$_";cat "c:\\var\\log\\kubelet\\$_" } }`,
),
execToPathFn(
| collectLogsFromNode | identifier_name |
azure_logcollector.go | ",
),
}
}
func windowsK8sLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"hyperv-operation.log",
"Get-WinEvent", "-LogName Microsoft-Windows-Hyper-V-Compute-Operational | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Sort-Object TimeCreated | Format-Table -Wrap -Autosize",
),
execToPathFn(
"containerd-containers.log",
"ctr.exe", "-n k8s.io containers list",
),
execToPathFn(
"containerd-tasks.log",
"ctr.exe", "-n k8s.io tasks list",
),
execToPathFn(
"containers-hcs.log",
"hcsdiag", "list",
),
execToPathFn(
"kubelet.log",
`Get-ChildItem "C:\\var\\log\\kubelet\\" | ForEach-Object { if ($_ -match 'log.INFO|err.*.log') { write-output "$_";cat "c:\\var\\log\\kubelet\\$_" } }`,
),
execToPathFn(
"cni.log",
`Get-Content "C:\\cni.log"`,
),
}
}
func windowsInfo(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"reboots.log",
"Get-WinEvent", `-ErrorAction Ignore -FilterHashtable @{logname = 'System'; id = 1074, 1076, 2004, 6005, 6006, 6008 } | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Format-Table -Wrap -Autosize`,
),
execToPathFn(
"scm.log",
"Get-WinEvent", `-FilterHashtable @{logname = 'System'; ProviderName = 'Service Control Manager' } | Select-Object -Property TimeCreated, Id, LevelDisplayName, Message | Format-Table -Wrap -Autosize`,
),
execToPathFn(
"pagefile.log",
"Get-CimInstance", "win32_pagefileusage | Format-List *",
),
execToPathFn(
"cloudbase-init-unattend.log",
"get-content 'C:\\Program Files\\Cloudbase Solutions\\Cloudbase-Init\\log\\cloudbase-init-unattend.log'",
),
execToPathFn(
"cloudbase-init.log",
"get-content 'C:\\Program Files\\Cloudbase Solutions\\Cloudbase-Init\\log\\cloudbase-init.log'",
),
execToPathFn(
"services.log",
"get-service",
),
}
}
func windowsNetworkLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"network.log",
"Get-HnsNetwork | Select Name, Type, Id, AddressPrefix | Format-Table -Wrap -Autosize",
),
execToPathFn(
"network-detailed.log",
"Get-hnsnetwork | Convertto-json -Depth 20",
),
execToPathFn(
"network-individual-detailed.log",
"Get-hnsnetwork | % { Get-HnsNetwork -Id $_.ID -Detailed } | Convertto-json -Depth 20",
),
execToPathFn(
"hnsendpoints.log",
"Get-HnsEndpoint | Select IpAddress, MacAddress, IsRemoteEndpoint, State",
),
execToPathFn(
"hnsendpolicy-detailed.log",
"Get-hnspolicylist | Convertto-json -Depth 20",
),
execToPathFn(
"ipconfig.log",
"ipconfig /allcompartments /all",
),
execToPathFn(
"ips.log",
"Get-NetIPAddress -IncludeAllCompartments",
),
execToPathFn(
"interfaces.log",
"Get-NetIPInterface -IncludeAllCompartments",
),
execToPathFn(
"hnsdiag.txt",
"hnsdiag list all -d",
),
}
}
func windowsCrashDumpLogs(execToPathFn func(outputFileName string, command string, args ...string) func() error) []func() error {
return []func() error{
execToPathFn(
"dir-localdumps.log",
// note: the powershell 'ls' alias will not have any output if the target directory is empty.
// we're logging the contents of the c:\localdumps directory because the command that invokes tar.exe below is
// not providing output when run in powershell over ssh for some reason.
"ls 'c:\\localdumps' -Recurse",
),
execToPathFn(
// capture any crashdump files created by windows into a .tar to be collected via sftp
"tar-crashdumps.log",
"$p = 'c:\\localdumps' ; if (Test-Path $p) { tar.exe -cvzf c:\\crashdumps.tar $p *>&1 | %{ Write-Output \"$_\"} } else { Write-Host \"No crash dumps found at $p\" }",
),
}
}
// collectVMBootLog collects boot logs of the vm by using azure boot diagnostics.
func collectVMBootLog(ctx context.Context, am *infrav1.AzureMachine, outputPath string) error {
if am == nil {
return errors.New("AzureMachine is nil")
}
Logf("Collecting boot logs for AzureMachine %s\n", am.GetName())
if am.Spec.ProviderID == nil {
return errors.New("AzureMachine provider ID is nil")
}
resource, err := azureutil.ParseResourceID(*am.Spec.ProviderID)
if err != nil {
return errors.Wrap(err, "failed to parse resource id")
}
settings, err := auth.GetSettingsFromEnvironment()
if err != nil {
return errors.Wrap(err, "failed to get settings from environment")
}
vmClient := compute.NewVirtualMachinesClient(settings.GetSubscriptionID())
vmClient.Authorizer, err = azureutil.GetAuthorizer(settings)
if err != nil {
return errors.Wrap(err, "failed to get authorizer")
}
bootDiagnostics, err := vmClient.RetrieveBootDiagnosticsData(ctx, resource.ResourceGroupName, resource.Name, nil)
if err != nil {
return errors.Wrap(err, "failed to get boot diagnostics data")
}
return writeBootLog(bootDiagnostics, outputPath)
}
// collectVMSSBootLog collects boot logs of the scale set by using azure boot diagnostics.
func collectVMSSBootLog(ctx context.Context, providerID string, outputPath string) error {
resourceID := strings.TrimPrefix(providerID, azureutil.ProviderIDPrefix)
v := strings.Split(resourceID, "/")
instanceID := v[len(v)-1]
resourceID = strings.TrimSuffix(resourceID, "/virtualMachines/"+instanceID)
resource, err := azureutil.ParseResourceID(resourceID)
if err != nil {
return errors.Wrap(err, "failed to parse resource id")
}
Logf("Collecting boot logs for VMSS instance %s of scale set %s\n", instanceID, resource.Name)
settings, err := auth.GetSettingsFromEnvironment()
if err != nil {
return errors.Wrap(err, "failed to get settings from environment")
}
vmssClient := compute.NewVirtualMachineScaleSetVMsClient(settings.GetSubscriptionID())
vmssClient.Authorizer, err = azureutil.GetAuthorizer(settings)
if err != nil {
return errors.Wrap(err, "failed to get authorizer")
}
bootDiagnostics, err := vmssClient.RetrieveBootDiagnosticsData(ctx, resource.ResourceGroupName, resource.Name, instanceID, nil)
if err != nil {
return errors.Wrap(err, "failed to get boot diagnostics data")
}
return writeBootLog(bootDiagnostics, outputPath)
}
func writeBootLog(bootDiagnostics compute.RetrieveBootDiagnosticsDataResult, outputPath string) error | {
var err error
req, err := http.NewRequestWithContext(context.TODO(), http.MethodGet, *bootDiagnostics.SerialConsoleLogBlobURI, http.NoBody)
if err != nil {
return errors.Wrap(err, "failed to create HTTP request")
}
resp, err := http.DefaultClient.Do(req)
if err != nil || resp.StatusCode != 200 {
return errors.Wrap(err, "failed to get logs from serial console uri")
}
defer resp.Body.Close()
content, err := io.ReadAll(resp.Body)
if err != nil {
return errors.Wrap(err, "failed to read response body")
}
if err := os.WriteFile(filepath.Join(outputPath, "boot.log"), content, 0o600); err != nil {
return errors.Wrap(err, "failed to write response to file")
} | identifier_body |
|
drkey.go | error {
keys, err := getKeys(ctx, conn, steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
// MAC and set authenticators inside request
payload := make([]byte, minSizeBaseReq(req))
serializeBaseRequest(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func createAuthsForE2EReservationSetup(ctx context.Context, conn DRKeyGetter,
req *E2EReservationSetup) error {
keys, err := getKeys(ctx, conn, req.Steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
payload := make([]byte, minSizeE2ESetupReq(req))
serializeE2EReservationSetup(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func validateResponseAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponse, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
payloads, err := serializeResponse(res, steps, reqTimestamp)
if err != nil {
return err
}
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateResponseErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponseError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path-AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeResponseError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateSetupErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2ESetupError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps.Copy()
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeSetupError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func getKeys(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
srcHost net.IP, valTime time.Time) ([]drkey.Key, error) {
if len(steps) < 2 {
return nil, serrors.New("wrong path in request")
}
return getKeysWithLocalIA(ctx, conn, steps[1:], steps[0].IA, srcHost, valTime)
}
func getKeysWithLocalIA(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
localIA addr.IA, host net.IP, valtime time.Time) ([]drkey.Key, error) {
keys := make([]drkey.Key, len(steps))
for i, step := range steps {
key, err := conn.DRKeyGetASHostKey(ctx,
drkey.ASHostMeta{
Lvl2Meta: drkey.Lvl2Meta{
ProtoId: drkey.COLIBRI,
Validity: valtime,
SrcIA: step.IA,
DstIA: localIA,
},
DstHost: host.String(),
})
if err != nil {
return nil, err
}
keys[i] = key.Key
}
return keys, nil
}
func minSizeBaseReq(req *BaseRequest) int {
// fail to compile if these fields are not there
_ = req.Id
_ = req.Index
_ = req.TimeStamp
_ = req.SrcHost
_ = req.DstHost
return (6 + reservation.IDSuffixE2ELen) + 1 + 4 + // ID + index + time_stamp
16 + 16 // srcHost + dstHost
}
func minSizeE2ESetupReq(req *E2EReservationSetup) int {
// fail to compile if these fields are not there
_ = req.BaseRequest
_ = req.RequestedBW
_ = req.Segments
// BaseRequest + BW + Segment reservation IDs + Steps
return minSizeBaseReq(&req.BaseRequest) +
1 + len(req.Segments)*reservation.IDSegLen + req.Steps.Size()
}
func serializeBaseRequest(buff []byte, req *BaseRequest) {
minSize := minSizeBaseReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := req.Id.Len()
// ID, index and timestamp:
req.Id.Read(buff[:offset]) // ignore errors (length was already checked)
buff[offset] = byte(req.Index)
offset++
binary.BigEndian.PutUint32(buff[offset:], util.TimeToSecs(req.TimeStamp))
offset += 4
// src and dst hosts:
copy(buff[offset:], req.SrcHost.To16())
offset += 16
copy(buff[offset:], req.DstHost.To16())
}
func | (buff []byte, req *E2EReservationSetup) {
minSize := minSizeE2ESetupReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := minSizeBaseReq(&req.BaseRequest)
serializeBaseRequest(buff[:offset], &req.BaseRequest)
// steps:
req.Steps.Serialize(buff[offset:])
offset += req.Steps.Size()
// BW and segments:
buff[offset] = byte(req.RequestedBW)
offset++
for _, id := range req.Segments {
id.Read(buff[offset:]) // ignore errors (length was already checked)
offset += reservation.IDSegLen
}
}
// serializeResponse returns the serialized versions of the response, one per AS in the path.
func serializeResponse(res *E2EResponse, steps base.PathSteps, timestamp time.Time) (
[][]byte, error) {
colPath, ok := res.ColibriPath.Dataplane().(snetpath.Colibri)
if !ok {
return nil, serrors.New("unsupported non colibri path type",
"path_type", common.TypeOf(res.ColibriPath.Dataplane()))
}
colibriPath, err := colPath.ToColibriPath()
if err != nil {
return nil, serrors.WrapStr("received invalid colibri path", err)
}
timestampBuff := make([]byte, 4)
binary.BigEndian.PutUint32(timestampBuff, util.TimeToSecs(timestamp))
allHfs := colibriPath.HopFields
payloads := make([][]byte, len(allHfs))
for i := range steps {
colibriPath.InfoField.HFCount = uint8(len(allHfs) - i)
colibriPath.HopFields = allHfs[i:]
payloads[i] = make([]byte, 1+4+colibriPath.Len()) // marker + timestamp + path
payloads[i][0] = 0 // success marker
copy(payloads[i][1:5], timestampBuff)
// TODO(juagargi) why are we serializing the transport path???
if err := colibriPath.SerializeTo(payloads[i][5:]); err != nil {
return nil, err
}
}
return payloads, nil
}
// serializeResponseError serializes the response error and returns one payload per AS in the path.
func serializeResponseError(res *E2EResponseError, timestamp time.Time) [][]byte {
message := ([]byte(res.Message))
// failure marker + timestamp + failedAS (as uint8) + message
payload := make([]byte, 1+4+1+len(message))
payload[0] = 1 // failure marker
binary.BigEndian.PutUint32(payload[1:5], util.TimeToSecs(timestamp))
payload[5] = uint8(res.FailedAS)
copy(payload[6:], message)
payloads := make([][]byte, len(res.Authenticators))
for i := range payloads {
payloads[i] | serializeE2EReservationSetup | identifier_name |
drkey.go | {
keys, err := getKeys(ctx, conn, steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
// MAC and set authenticators inside request
payload := make([]byte, minSizeBaseReq(req))
serializeBaseRequest(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func createAuthsForE2EReservationSetup(ctx context.Context, conn DRKeyGetter,
req *E2EReservationSetup) error {
keys, err := getKeys(ctx, conn, req.Steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
payload := make([]byte, minSizeE2ESetupReq(req))
serializeE2EReservationSetup(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func validateResponseAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponse, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
payloads, err := serializeResponse(res, steps, reqTimestamp)
if err != nil {
return err
}
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateResponseErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponseError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path-AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeResponseError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateSetupErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2ESetupError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps.Copy()
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeSetupError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func getKeys(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
srcHost net.IP, valTime time.Time) ([]drkey.Key, error) {
if len(steps) < 2 {
return nil, serrors.New("wrong path in request")
}
return getKeysWithLocalIA(ctx, conn, steps[1:], steps[0].IA, srcHost, valTime)
}
func getKeysWithLocalIA(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
localIA addr.IA, host net.IP, valtime time.Time) ([]drkey.Key, error) {
keys := make([]drkey.Key, len(steps))
for i, step := range steps {
key, err := conn.DRKeyGetASHostKey(ctx,
drkey.ASHostMeta{
Lvl2Meta: drkey.Lvl2Meta{
ProtoId: drkey.COLIBRI,
Validity: valtime,
SrcIA: step.IA,
DstIA: localIA,
},
DstHost: host.String(),
})
if err != nil {
return nil, err
}
keys[i] = key.Key
}
return keys, nil
}
func minSizeBaseReq(req *BaseRequest) int {
// fail to compile if these fields are not there
_ = req.Id
_ = req.Index
_ = req.TimeStamp
_ = req.SrcHost
_ = req.DstHost
return (6 + reservation.IDSuffixE2ELen) + 1 + 4 + // ID + index + time_stamp
16 + 16 // srcHost + dstHost
}
func minSizeE2ESetupReq(req *E2EReservationSetup) int {
// fail to compile if these fields are not there
_ = req.BaseRequest
_ = req.RequestedBW
_ = req.Segments
// BaseRequest + BW + Segment reservation IDs + Steps
return minSizeBaseReq(&req.BaseRequest) +
1 + len(req.Segments)*reservation.IDSegLen + req.Steps.Size()
}
func serializeBaseRequest(buff []byte, req *BaseRequest) |
func serializeE2EReservationSetup(buff []byte, req *E2EReservationSetup) {
minSize := minSizeE2ESetupReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := minSizeBaseReq(&req.BaseRequest)
serializeBaseRequest(buff[:offset], &req.BaseRequest)
// steps:
req.Steps.Serialize(buff[offset:])
offset += req.Steps.Size()
// BW and segments:
buff[offset] = byte(req.RequestedBW)
offset++
for _, id := range req.Segments {
id.Read(buff[offset:]) // ignore errors (length was already checked)
offset += reservation.IDSegLen
}
}
// serializeResponse returns the serialized versions of the response, one per AS in the path.
func serializeResponse(res *E2EResponse, steps base.PathSteps, timestamp time.Time) (
[][]byte, error) {
colPath, ok := res.ColibriPath.Dataplane().(snetpath.Colibri)
if !ok {
return nil, serrors.New("unsupported non colibri path type",
"path_type", common.TypeOf(res.ColibriPath.Dataplane()))
}
colibriPath, err := colPath.ToColibriPath()
if err != nil {
return nil, serrors.WrapStr("received invalid colibri path", err)
}
timestampBuff := make([]byte, 4)
binary.BigEndian.PutUint32(timestampBuff, util.TimeToSecs(timestamp))
allHfs := colibriPath.HopFields
payloads := make([][]byte, len(allHfs))
for i := range steps {
colibriPath.InfoField.HFCount = uint8(len(allHfs) - i)
colibriPath.HopFields = allHfs[i:]
payloads[i] = make([]byte, 1+4+colibriPath.Len()) // marker + timestamp + path
payloads[i][0] = 0 // success marker
copy(payloads[i][1:5], timestampBuff)
// TODO(juagargi) why are we serializing the transport path???
if err := colibriPath.SerializeTo(payloads[i][5:]); err != nil {
return nil, err
}
}
return payloads, nil
}
// serializeResponseError serializes the response error and returns one payload per AS in the path.
func serializeResponseError(res *E2EResponseError, timestamp time.Time) [][]byte {
message := ([]byte(res.Message))
// failure marker + timestamp + failedAS (as uint8) + message
payload := make([]byte, 1+4+1+len(message))
payload[0] = 1 // failure marker
binary.BigEndian.PutUint32(payload[1:5], util.TimeToSecs(timestamp))
payload[5] = uint8(res.FailedAS)
copy(payload[6:], message)
payloads := make([][]byte, len(res.Authenticators))
for i := range payloads {
payloads[i] | {
minSize := minSizeBaseReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := req.Id.Len()
// ID, index and timestamp:
req.Id.Read(buff[:offset]) // ignore errors (length was already checked)
buff[offset] = byte(req.Index)
offset++
binary.BigEndian.PutUint32(buff[offset:], util.TimeToSecs(req.TimeStamp))
offset += 4
// src and dst hosts:
copy(buff[offset:], req.SrcHost.To16())
offset += 16
copy(buff[offset:], req.DstHost.To16())
} | identifier_body |
drkey.go | error {
keys, err := getKeys(ctx, conn, steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
// MAC and set authenticators inside request
payload := make([]byte, minSizeBaseReq(req))
serializeBaseRequest(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func createAuthsForE2EReservationSetup(ctx context.Context, conn DRKeyGetter,
req *E2EReservationSetup) error {
keys, err := getKeys(ctx, conn, req.Steps, req.SrcHost, req.TimeStamp)
if err != nil {
return err
}
payload := make([]byte, minSizeE2ESetupReq(req))
serializeE2EReservationSetup(payload, req)
req.Authenticators, err = computeAuthenticators(payload, keys)
return err
}
func validateResponseAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponse, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
payloads, err := serializeResponse(res, steps, reqTimestamp)
if err != nil {
return err
}
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateResponseErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2EResponseError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path-AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeResponseError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateSetupErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2ESetupError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps.Copy()
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}() | return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func getKeys(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
srcHost net.IP, valTime time.Time) ([]drkey.Key, error) {
if len(steps) < 2 {
return nil, serrors.New("wrong path in request")
}
return getKeysWithLocalIA(ctx, conn, steps[1:], steps[0].IA, srcHost, valTime)
}
func getKeysWithLocalIA(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
localIA addr.IA, host net.IP, valtime time.Time) ([]drkey.Key, error) {
keys := make([]drkey.Key, len(steps))
for i, step := range steps {
key, err := conn.DRKeyGetASHostKey(ctx,
drkey.ASHostMeta{
Lvl2Meta: drkey.Lvl2Meta{
ProtoId: drkey.COLIBRI,
Validity: valtime,
SrcIA: step.IA,
DstIA: localIA,
},
DstHost: host.String(),
})
if err != nil {
return nil, err
}
keys[i] = key.Key
}
return keys, nil
}
func minSizeBaseReq(req *BaseRequest) int {
// fail to compile if these fields are not there
_ = req.Id
_ = req.Index
_ = req.TimeStamp
_ = req.SrcHost
_ = req.DstHost
return (6 + reservation.IDSuffixE2ELen) + 1 + 4 + // ID + index + time_stamp
16 + 16 // srcHost + dstHost
}
func minSizeE2ESetupReq(req *E2EReservationSetup) int {
// fail to compile if these fields are not there
_ = req.BaseRequest
_ = req.RequestedBW
_ = req.Segments
// BaseRequest + BW + Segment reservation IDs + Steps
return minSizeBaseReq(&req.BaseRequest) +
1 + len(req.Segments)*reservation.IDSegLen + req.Steps.Size()
}
func serializeBaseRequest(buff []byte, req *BaseRequest) {
minSize := minSizeBaseReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := req.Id.Len()
// ID, index and timestamp:
req.Id.Read(buff[:offset]) // ignore errors (length was already checked)
buff[offset] = byte(req.Index)
offset++
binary.BigEndian.PutUint32(buff[offset:], util.TimeToSecs(req.TimeStamp))
offset += 4
// src and dst hosts:
copy(buff[offset:], req.SrcHost.To16())
offset += 16
copy(buff[offset:], req.DstHost.To16())
}
func serializeE2EReservationSetup(buff []byte, req *E2EReservationSetup) {
minSize := minSizeE2ESetupReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := minSizeBaseReq(&req.BaseRequest)
serializeBaseRequest(buff[:offset], &req.BaseRequest)
// steps:
req.Steps.Serialize(buff[offset:])
offset += req.Steps.Size()
// BW and segments:
buff[offset] = byte(req.RequestedBW)
offset++
for _, id := range req.Segments {
id.Read(buff[offset:]) // ignore errors (length was already checked)
offset += reservation.IDSegLen
}
}
// serializeResponse returns the serialized versions of the response, one per AS in the path.
func serializeResponse(res *E2EResponse, steps base.PathSteps, timestamp time.Time) (
[][]byte, error) {
colPath, ok := res.ColibriPath.Dataplane().(snetpath.Colibri)
if !ok {
return nil, serrors.New("unsupported non colibri path type",
"path_type", common.TypeOf(res.ColibriPath.Dataplane()))
}
colibriPath, err := colPath.ToColibriPath()
if err != nil {
return nil, serrors.WrapStr("received invalid colibri path", err)
}
timestampBuff := make([]byte, 4)
binary.BigEndian.PutUint32(timestampBuff, util.TimeToSecs(timestamp))
allHfs := colibriPath.HopFields
payloads := make([][]byte, len(allHfs))
for i := range steps {
colibriPath.InfoField.HFCount = uint8(len(allHfs) - i)
colibriPath.HopFields = allHfs[i:]
payloads[i] = make([]byte, 1+4+colibriPath.Len()) // marker + timestamp + path
payloads[i][0] = 0 // success marker
copy(payloads[i][1:5], timestampBuff)
// TODO(juagargi) why are we serializing the transport path???
if err := colibriPath.SerializeTo(payloads[i][5:]); err != nil {
return nil, err
}
}
return payloads, nil
}
// serializeResponseError serializes the response error and returns one payload per AS in the path.
func serializeResponseError(res *E2EResponseError, timestamp time.Time) [][]byte {
message := ([]byte(res.Message))
// failure marker + timestamp + failedAS (as uint8) + message
payload := make([]byte, 1+4+1+len(message))
payload[0] = 1 // failure marker
binary.BigEndian.PutUint32(payload[1:5], util.TimeToSecs(timestamp))
payload[5] = uint8(res.FailedAS)
copy(payload[6:], message)
payloads := make([][]byte, len(res.Authenticators))
for i := range payloads {
payloads[i] | res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeSetupError(res, reqTimestamp) | random_line_split |
drkey.go | .PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path-AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeResponseError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func validateSetupErrorAuthenticators(ctx context.Context, conn DRKeyGetter,
res *E2ESetupError, steps base.PathSteps, srcHost net.IP,
reqTimestamp time.Time) error {
if err := checkValidAuthenticatorsAndPath(res, steps); err != nil {
return err
}
if err := checkEqualLength(res.Authenticators, steps); err != nil {
return err
}
// because a failure can originate at any on-path AS, skip ASes before its origin:
originalAuthenticators := res.Authenticators
originalSteps := steps.Copy()
defer func() {
res.Authenticators = originalAuthenticators
steps = originalSteps
}()
res.Authenticators = res.Authenticators[:res.FailedAS+1]
steps = steps[:res.FailedAS+1]
payloads := serializeSetupError(res, reqTimestamp)
return validateBasic(ctx, conn, payloads, res.Authenticators,
steps, srcHost, reqTimestamp)
}
func getKeys(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
srcHost net.IP, valTime time.Time) ([]drkey.Key, error) {
if len(steps) < 2 {
return nil, serrors.New("wrong path in request")
}
return getKeysWithLocalIA(ctx, conn, steps[1:], steps[0].IA, srcHost, valTime)
}
func getKeysWithLocalIA(ctx context.Context, conn DRKeyGetter, steps []base.PathStep,
localIA addr.IA, host net.IP, valtime time.Time) ([]drkey.Key, error) {
keys := make([]drkey.Key, len(steps))
for i, step := range steps {
key, err := conn.DRKeyGetASHostKey(ctx,
drkey.ASHostMeta{
Lvl2Meta: drkey.Lvl2Meta{
ProtoId: drkey.COLIBRI,
Validity: valtime,
SrcIA: step.IA,
DstIA: localIA,
},
DstHost: host.String(),
})
if err != nil {
return nil, err
}
keys[i] = key.Key
}
return keys, nil
}
func minSizeBaseReq(req *BaseRequest) int {
// fail to compile if these fields are not there
_ = req.Id
_ = req.Index
_ = req.TimeStamp
_ = req.SrcHost
_ = req.DstHost
return (6 + reservation.IDSuffixE2ELen) + 1 + 4 + // ID + index + time_stamp
16 + 16 // srcHost + dstHost
}
func minSizeE2ESetupReq(req *E2EReservationSetup) int {
// fail to compile if these fields are not there
_ = req.BaseRequest
_ = req.RequestedBW
_ = req.Segments
// BaseRequest + BW + Segment reservation IDs + Steps
return minSizeBaseReq(&req.BaseRequest) +
1 + len(req.Segments)*reservation.IDSegLen + req.Steps.Size()
}
func serializeBaseRequest(buff []byte, req *BaseRequest) {
minSize := minSizeBaseReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := req.Id.Len()
// ID, index and timestamp:
req.Id.Read(buff[:offset]) // ignore errors (length was already checked)
buff[offset] = byte(req.Index)
offset++
binary.BigEndian.PutUint32(buff[offset:], util.TimeToSecs(req.TimeStamp))
offset += 4
// src and dst hosts:
copy(buff[offset:], req.SrcHost.To16())
offset += 16
copy(buff[offset:], req.DstHost.To16())
}
func serializeE2EReservationSetup(buff []byte, req *E2EReservationSetup) {
minSize := minSizeE2ESetupReq(req)
assert(len(buff) >= minSize, "buffer too short (actual %d < minimum %d)",
len(buff), minSize)
offset := minSizeBaseReq(&req.BaseRequest)
serializeBaseRequest(buff[:offset], &req.BaseRequest)
// steps:
req.Steps.Serialize(buff[offset:])
offset += req.Steps.Size()
// BW and segments:
buff[offset] = byte(req.RequestedBW)
offset++
for _, id := range req.Segments {
id.Read(buff[offset:]) // ignore errors (length was already checked)
offset += reservation.IDSegLen
}
}
// serializeResponse returns the serialized versions of the response, one per AS in the path.
func serializeResponse(res *E2EResponse, steps base.PathSteps, timestamp time.Time) (
[][]byte, error) {
colPath, ok := res.ColibriPath.Dataplane().(snetpath.Colibri)
if !ok {
return nil, serrors.New("unsupported non colibri path type",
"path_type", common.TypeOf(res.ColibriPath.Dataplane()))
}
colibriPath, err := colPath.ToColibriPath()
if err != nil {
return nil, serrors.WrapStr("received invalid colibri path", err)
}
timestampBuff := make([]byte, 4)
binary.BigEndian.PutUint32(timestampBuff, util.TimeToSecs(timestamp))
allHfs := colibriPath.HopFields
payloads := make([][]byte, len(allHfs))
for i := range steps {
colibriPath.InfoField.HFCount = uint8(len(allHfs) - i)
colibriPath.HopFields = allHfs[i:]
payloads[i] = make([]byte, 1+4+colibriPath.Len()) // marker + timestamp + path
payloads[i][0] = 0 // success marker
copy(payloads[i][1:5], timestampBuff)
// TODO(juagargi) why are we serializing the transport path???
if err := colibriPath.SerializeTo(payloads[i][5:]); err != nil {
return nil, err
}
}
return payloads, nil
}
// serializeResponseError serializes the response error and returns one payload per AS in the path.
func serializeResponseError(res *E2EResponseError, timestamp time.Time) [][]byte {
message := ([]byte(res.Message))
// failure marker + timestamp + failedAS (as uint8) + message
payload := make([]byte, 1+4+1+len(message))
payload[0] = 1 // failure marker
binary.BigEndian.PutUint32(payload[1:5], util.TimeToSecs(timestamp))
payload[5] = uint8(res.FailedAS)
copy(payload[6:], message)
payloads := make([][]byte, len(res.Authenticators))
for i := range payloads {
payloads[i] = payload
}
return payloads
}
// serializeSetupError serializes the setup error and returns one payload per AS in the path.
func serializeSetupError(res *E2ESetupError, timestamp time.Time) [][]byte {
message := ([]byte(res.Message))
// failure marker + timestamp + failedAS (as uint8) + message
payload := make([]byte, 1+4+1+len(message))
payload[0] = 1 // failure marker
binary.BigEndian.PutUint32(payload[1:5], util.TimeToSecs(timestamp))
payload[5] = uint8(res.FailedAS)
copy(payload[6:], message)
payloads := make([][]byte, len(res.Authenticators))
for i := range payloads {
trail := res.AllocationTrail[i:]
// append trail to payload (convoluted due to the type cast to byte):
payloads[i] = append(payload, make([]byte, len(trail))...)
for j := range trail {
payloads[i][len(payload)+j] = byte(trail[j])
}
}
return payloads
}
func validateBasic(ctx context.Context, conn DRKeyGetter, payloads [][]byte,
authenticators [][]byte, steps base.PathSteps,
srcHost net.IP, valTime time.Time) error {
keys, err := getKeysWithLocalIA(ctx, conn, steps,
steps.SrcIA(), srcHost, valTime)
if err != nil {
return err
}
ok, err := validateAuthenticators(payloads, keys, authenticators)
if err != nil {
return err
}
if !ok | {
return serrors.New("validation failed for response")
} | conditional_block |
|
cli.rs | ,
count: u64,
bytes: u128,
}
impl TopicStat {
fn new(topic: &str) -> Self {
Self {
topic: topic.to_owned(),
count: 0,
bytes: 0,
}
}
fn count(&mut self, size: usize) {
self.bytes += size as u128;
self.count += 1;
}
}
//impl Ord for TopicStat {
//fn cmp(&self, other: &Self) -> core::cmp::Ordering {
//self.count.cmp(&other.count)
//}
//}
//impl PartialOrd for TopicStat {
//fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
//Some(self.cmp(other))
//}
//}
//impl PartialEq for TopicStat {
//fn eq(&self, other: &Self) -> bool {
//self.count == other.count
//}
//}
struct MessageTest {
name: String,
data: Vec<u8>,
iterations: u32,
}
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
impl MessageTest {
fn new(name: &str, iterations: u32) -> Self {
let mut data = Vec::new();
let size = byte_unit::Byte::from_str(name).unwrap().get_bytes();
for i in 0..size {
data.push(i as u8);
}
Self {
name: name.to_owned(),
data,
iterations,
}
}
}
#[allow(clippy::too_many_lines)]
async fn benchmark(
config: &client::Config,
benchmark_workers: u32,
iterations: u32,
sub_node: Option<&String>,
) {
let it_total = benchmark_workers * iterations;
info!(
"Benchmarking, {} workers, {} iterations per worker...",
benchmark_workers, iterations
);
let mut rng = rand::thread_rng();
let mut workers = Vec::new();
for i in 0..benchmark_workers {
let mut client = client::Client::connect(config).await.unwrap();
let (data_channel, r_client) = if let Some(p) = sub_node {
let mut r_config = config.clone();
r_config.update_path(p);
let mut r_client = client::Client::connect(&r_config).await.unwrap();
(r_client.take_data_channel().unwrap(), Some(r_client))
} else {
(client.take_data_channel().unwrap(), None)
};
assert!(client.is_connected());
let bi: u32 = rng.gen();
workers.push(Arc::new(BenchmarkWorker::new(
format!("{}/{}", bi, i),
client,
r_client,
data_channel,
)));
}
let mut futures = Vec::new();
staged_benchmark_start!("subscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
let message_tests = vec![
MessageTest::new("10b", iterations),
MessageTest::new("1kb", iterations),
MessageTest::new("10kb", iterations),
MessageTest::new("100kb", iterations / 10),
MessageTest::new("1mb", iterations / 100),
];
for test in message_tests {
benchmark_message(
&format!("pub-{}", test.name),
&test.data,
&workers,
test.iterations,
false,
)
.await;
benchmark_message(
&format!("pub-read-{}", test.name),
&test.data,
&workers,
test.iterations,
true,
)
.await;
}
let mut futures = Vec::new();
staged_benchmark_start!("unsubscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
for wrk in &workers {
wrk.bye().await.unwrap();
assert!(!wrk.is_connected());
}
staged_benchmark_print!();
}
#[inline]
fn parse_topics(topic: Option<&String>) -> Vec<String> {
topic
.expect(ERR_TOPIC_NOT_SPECIFIED)
.split(',')
.into_iter()
.map(ToOwned::to_owned)
.collect::<Vec<String>>()
}
#[tokio::main(worker_threads = 1)]
async fn main() {
let opts = Opts::parse();
env_logger::Builder::new()
.target(env_logger::Target::Stdout)
.filter_level(if opts.benchmark || opts.top {
log::LevelFilter::Info
} else {
log::LevelFilter::Trace
})
.init();
let queue_size = if opts.benchmark { 256_000 } else { 4_096 };
let user = opts.user.unwrap_or_else(|| "".to_owned());
let password = opts.password.unwrap_or_else(|| "".to_owned());
let tls_ca = if let Some(cafile) = opts.tls_ca {
Some(tokio::fs::read_to_string(cafile).await.unwrap())
} else {
None
};
let mut config = client::Config::new(&opts.path)
.set_auth(&user, &password)
.set_queue_size(queue_size)
.set_timeout(Duration::from_secs_f64(opts.timeout))
.set_tls(opts.tls)
.set_tls_ca(tls_ca)
.build();
if opts.benchmark {
benchmark(
&config,
opts.benchmark_workers,
opts.benchmark_iterations,
opts.benchmark_cluster_sub.as_ref(),
)
.await;
} else if opts.top {
static SORT_MODE: atomic::AtomicU8 = atomic::AtomicU8::new(0);
macro_rules! cls {
() => {
print!("{esc}[2J{esc}[1;1H", esc = 27 as char);
};
}
let mut client = client::Client::connect(&config).await.unwrap();
let mut data_channel = client.take_data_channel().unwrap();
let mut topic_stats: BTreeMap<String, TopicStat> = BTreeMap::new();
client
.subscribe_bulk(parse_topics(opts.topic.as_ref()))
.await
.unwrap();
let client = Arc::new(client);
tokio::spawn(async move {
loop {
signal(SignalKind::interrupt()).unwrap().recv().await;
cls!();
client.bye().await.unwrap();
std::process::exit(0);
}
});
let mut last_refresh: Option<Instant> = None;
let show_step = Duration::from_secs(1);
let mut table = prepare_stat_table();
let getch = getch::Getch::new();
std::thread::spawn(move || loop {
let ch = getch.getch().unwrap();
match ch as char {
's' => {
let s = SORT_MODE.load(atomic::Ordering::SeqCst);
SORT_MODE.store(s ^ 1, atomic::Ordering::SeqCst);
}
_ => {}
}
});
table.add_row(row![' ', ' ', ' ']);
cls!();
table.printstd();
loop {
let message = data_channel.recv().await.unwrap();
let topic = message.topic();
if let Some(stat) = topic_stats.get_mut(topic) {
stat.count(message.data().len());
} else {
let mut stat = TopicStat::new(topic);
stat.count(message.data().len());
topic_stats.insert(topic.to_owned(), stat);
}
if let Some(last_refresh) = last_refresh {
if last_refresh.elapsed() < show_step {
continue;
}
}
last_refresh = Some(Instant::now());
let mut stats: Vec<&TopicStat> = topic_stats.values().collect();
stats.sort_by(|a, b| {
if SORT_MODE.load(atomic::Ordering::SeqCst) == 0 {
b.count.cmp(&a.count)
} else {
b.bytes.cmp(&a.bytes)
}
});
let (_, h) = term_size::dimensions().unwrap();
stats.truncate(h - 4);
let mut table = prepare_stat_table();
for s in stats {
let byte = byte_unit::Byte::from_bytes(s.bytes);
table.add_row(row![
s.topic,
s.count.to_formatted_string(&Locale::en).replace(',', "_"),
byte.get_appropriate_unit(false)
]);
}
cls!();
table.printstd();
}
} else {
if opts.message.is_some() {
config = config.disable_data_stream();
} | let mut client = client::Client::connect(&config).await.unwrap(); | random_line_split |
|
cli.rs | (
id: String,
client: client::Client,
r_client: Option<client::Client>,
data_channel: mpsc::Receiver<psrt::Message>,
) -> Self {
Self {
id,
client,
r_client,
data_channel: Arc::new(RwLock::new(data_channel)),
}
}
fn is_connected(&self) -> bool {
self.r_client.as_ref().map_or_else(
|| self.client.is_connected(),
|r| r.is_connected() && self.client.is_connected(),
)
}
async fn bye(&self) -> Result<(), psrt::Error> {
self.client.bye().await?;
if let Some(ref r) = self.r_client {
r.bye().await?;
}
Ok(())
}
}
#[allow(clippy::cast_possible_truncation)]
async fn benchmark_message(
name: &str,
message: &[u8],
workers: &[Arc<BenchmarkWorker>],
iterations: u32,
wait_read: bool,
) {
let mut futures = Vec::new();
let message = Arc::new(message.to_vec());
staged_benchmark_start!(name);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
let worker = wrk.clone();
let test_msg = message.clone();
if wait_read {
if let Some(ref r_client) = wrk.r_client {
r_client.subscribe(test_topic.clone()).await.unwrap();
} else {
wrk.client.subscribe(test_topic.clone()).await.unwrap();
}
let data_fut = tokio::spawn(async move {
let mut channel = worker.data_channel.write().await;
for _ in 0..iterations {
let msg = channel.recv().await.unwrap();
assert_eq!(msg.data(), *test_msg);
}
});
futures.push(data_fut);
}
let worker = wrk.clone();
let test_msg = message.clone();
let fut = tokio::spawn(async move {
for _ in 0..iterations {
worker
.client
.publish(DEFAULT_PRIORITY, test_topic.clone(), (*test_msg).clone())
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(workers.len() as u32 * iterations);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
wrk.client.unsubscribe(test_topic.clone()).await.unwrap();
}
}
fn prepare_stat_table() -> Table {
let mut table = Table::new();
let format = prettytable::format::FormatBuilder::new()
.column_separator(' ')
.borders(' ')
.separators(
&[prettytable::format::LinePosition::Title],
prettytable::format::LineSeparator::new('-', '-', '-', '-'),
)
.padding(0, 1)
.build();
table.set_format(format);
let titlevec: Vec<prettytable::Cell> = ["topic", "count", "bytes"]
.iter()
.map(|v| prettytable::Cell::new(v).style_spec("Fb"))
.collect();
table.set_titles(prettytable::Row::new(titlevec));
table
}
//#[derive(Eq)]
struct TopicStat {
topic: String,
count: u64,
bytes: u128,
}
impl TopicStat {
fn new(topic: &str) -> Self {
Self {
topic: topic.to_owned(),
count: 0,
bytes: 0,
}
}
fn count(&mut self, size: usize) {
self.bytes += size as u128;
self.count += 1;
}
}
//impl Ord for TopicStat {
//fn cmp(&self, other: &Self) -> core::cmp::Ordering {
//self.count.cmp(&other.count)
//}
//}
//impl PartialOrd for TopicStat {
//fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
//Some(self.cmp(other))
//}
//}
//impl PartialEq for TopicStat {
//fn eq(&self, other: &Self) -> bool {
//self.count == other.count
//}
//}
struct MessageTest {
name: String,
data: Vec<u8>,
iterations: u32,
}
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
impl MessageTest {
fn new(name: &str, iterations: u32) -> Self {
let mut data = Vec::new();
let size = byte_unit::Byte::from_str(name).unwrap().get_bytes();
for i in 0..size {
data.push(i as u8);
}
Self {
name: name.to_owned(),
data,
iterations,
}
}
}
#[allow(clippy::too_many_lines)]
async fn benchmark(
config: &client::Config,
benchmark_workers: u32,
iterations: u32,
sub_node: Option<&String>,
) {
let it_total = benchmark_workers * iterations;
info!(
"Benchmarking, {} workers, {} iterations per worker...",
benchmark_workers, iterations
);
let mut rng = rand::thread_rng();
let mut workers = Vec::new();
for i in 0..benchmark_workers {
let mut client = client::Client::connect(config).await.unwrap();
let (data_channel, r_client) = if let Some(p) = sub_node {
let mut r_config = config.clone();
r_config.update_path(p);
let mut r_client = client::Client::connect(&r_config).await.unwrap();
(r_client.take_data_channel().unwrap(), Some(r_client))
} else {
(client.take_data_channel().unwrap(), None)
};
assert!(client.is_connected());
let bi: u32 = rng.gen();
workers.push(Arc::new(BenchmarkWorker::new(
format!("{}/{}", bi, i),
client,
r_client,
data_channel,
)));
}
let mut futures = Vec::new();
staged_benchmark_start!("subscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
let message_tests = vec![
MessageTest::new("10b", iterations),
MessageTest::new("1kb", iterations),
MessageTest::new("10kb", iterations),
MessageTest::new("100kb", iterations / 10),
MessageTest::new("1mb", iterations / 100),
];
for test in message_tests {
benchmark_message(
&format!("pub-{}", test.name),
&test.data,
&workers,
test.iterations,
false,
)
.await;
benchmark_message(
&format!("pub-read-{}", test.name),
&test.data,
&workers,
test.iterations,
true,
)
.await;
}
let mut futures = Vec::new();
staged_benchmark_start!("unsubscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
for wrk in &workers {
wrk.bye().await.unwrap();
assert!(!wrk.is_connected());
}
staged_benchmark_print!();
}
#[inline]
fn | (topic: Option<&String>) -> Vec<String> {
topic
.expect(ERR_TOPIC_NOT_SPECIFIED)
.split(',')
.into_iter()
.map(ToOwned::to_owned)
.collect::<Vec<String>>()
}
#[tokio::main(worker_threads = 1)]
async fn main() {
let opts = Opts::parse();
env_logger::Builder::new()
.target(env_logger::Target::Stdout)
.filter_level(if opts.benchmark || opts.top {
log::LevelFilter::Info
} else {
log::LevelFilter::Trace
})
.init();
let queue_size = if opts.benchmark { 256_000 } else { 4_096 };
let user = opts.user.unwrap_or_else(|| "".to_owned());
let password = opts.password.unwrap_or_else(|| "".to_owned());
let tls_ca = if let Some(cafile) = opts.tls_ca {
Some(tokio::fs::read_to_string(cafile).await.unwrap())
} else {
None
};
let mut config = client::Config::new(&opts.path)
.set | parse_topics | identifier_name |
cli.rs | client: client::Client,
r_client: Option<client::Client>,
data_channel: mpsc::Receiver<psrt::Message>,
) -> Self {
Self {
id,
client,
r_client,
data_channel: Arc::new(RwLock::new(data_channel)),
}
}
fn is_connected(&self) -> bool {
self.r_client.as_ref().map_or_else(
|| self.client.is_connected(),
|r| r.is_connected() && self.client.is_connected(),
)
}
async fn bye(&self) -> Result<(), psrt::Error> {
self.client.bye().await?;
if let Some(ref r) = self.r_client {
r.bye().await?;
}
Ok(())
}
}
#[allow(clippy::cast_possible_truncation)]
async fn benchmark_message(
name: &str,
message: &[u8],
workers: &[Arc<BenchmarkWorker>],
iterations: u32,
wait_read: bool,
) {
let mut futures = Vec::new();
let message = Arc::new(message.to_vec());
staged_benchmark_start!(name);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
let worker = wrk.clone();
let test_msg = message.clone();
if wait_read {
if let Some(ref r_client) = wrk.r_client {
r_client.subscribe(test_topic.clone()).await.unwrap();
} else {
wrk.client.subscribe(test_topic.clone()).await.unwrap();
}
let data_fut = tokio::spawn(async move {
let mut channel = worker.data_channel.write().await;
for _ in 0..iterations {
let msg = channel.recv().await.unwrap();
assert_eq!(msg.data(), *test_msg);
}
});
futures.push(data_fut);
}
let worker = wrk.clone();
let test_msg = message.clone();
let fut = tokio::spawn(async move {
for _ in 0..iterations {
worker
.client
.publish(DEFAULT_PRIORITY, test_topic.clone(), (*test_msg).clone())
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(workers.len() as u32 * iterations);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
wrk.client.unsubscribe(test_topic.clone()).await.unwrap();
}
}
fn prepare_stat_table() -> Table {
let mut table = Table::new();
let format = prettytable::format::FormatBuilder::new()
.column_separator(' ')
.borders(' ')
.separators(
&[prettytable::format::LinePosition::Title],
prettytable::format::LineSeparator::new('-', '-', '-', '-'),
)
.padding(0, 1)
.build();
table.set_format(format);
let titlevec: Vec<prettytable::Cell> = ["topic", "count", "bytes"]
.iter()
.map(|v| prettytable::Cell::new(v).style_spec("Fb"))
.collect();
table.set_titles(prettytable::Row::new(titlevec));
table
}
//#[derive(Eq)]
struct TopicStat {
topic: String,
count: u64,
bytes: u128,
}
impl TopicStat {
fn new(topic: &str) -> Self {
Self {
topic: topic.to_owned(),
count: 0,
bytes: 0,
}
}
fn count(&mut self, size: usize) {
self.bytes += size as u128;
self.count += 1;
}
}
//impl Ord for TopicStat {
//fn cmp(&self, other: &Self) -> core::cmp::Ordering {
//self.count.cmp(&other.count)
//}
//}
//impl PartialOrd for TopicStat {
//fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
//Some(self.cmp(other))
//}
//}
//impl PartialEq for TopicStat {
//fn eq(&self, other: &Self) -> bool {
//self.count == other.count
//}
//}
struct MessageTest {
name: String,
data: Vec<u8>,
iterations: u32,
}
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
impl MessageTest {
fn new(name: &str, iterations: u32) -> Self {
let mut data = Vec::new();
let size = byte_unit::Byte::from_str(name).unwrap().get_bytes();
for i in 0..size {
data.push(i as u8);
}
Self {
name: name.to_owned(),
data,
iterations,
}
}
}
#[allow(clippy::too_many_lines)]
async fn benchmark(
config: &client::Config,
benchmark_workers: u32,
iterations: u32,
sub_node: Option<&String>,
) {
let it_total = benchmark_workers * iterations;
info!(
"Benchmarking, {} workers, {} iterations per worker...",
benchmark_workers, iterations
);
let mut rng = rand::thread_rng();
let mut workers = Vec::new();
for i in 0..benchmark_workers {
let mut client = client::Client::connect(config).await.unwrap();
let (data_channel, r_client) = if let Some(p) = sub_node {
let mut r_config = config.clone();
r_config.update_path(p);
let mut r_client = client::Client::connect(&r_config).await.unwrap();
(r_client.take_data_channel().unwrap(), Some(r_client))
} else {
(client.take_data_channel().unwrap(), None)
};
assert!(client.is_connected());
let bi: u32 = rng.gen();
workers.push(Arc::new(BenchmarkWorker::new(
format!("{}/{}", bi, i),
client,
r_client,
data_channel,
)));
}
let mut futures = Vec::new();
staged_benchmark_start!("subscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
let message_tests = vec![
MessageTest::new("10b", iterations),
MessageTest::new("1kb", iterations),
MessageTest::new("10kb", iterations),
MessageTest::new("100kb", iterations / 10),
MessageTest::new("1mb", iterations / 100),
];
for test in message_tests {
benchmark_message(
&format!("pub-{}", test.name),
&test.data,
&workers,
test.iterations,
false,
)
.await;
benchmark_message(
&format!("pub-read-{}", test.name),
&test.data,
&workers,
test.iterations,
true,
)
.await;
}
let mut futures = Vec::new();
staged_benchmark_start!("unsubscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
for wrk in &workers {
wrk.bye().await.unwrap();
assert!(!wrk.is_connected());
}
staged_benchmark_print!();
}
#[inline]
fn parse_topics(topic: Option<&String>) -> Vec<String> {
topic
.expect(ERR_TOPIC_NOT_SPECIFIED)
.split(',')
.into_iter()
.map(ToOwned::to_owned)
.collect::<Vec<String>>()
}
#[tokio::main(worker_threads = 1)]
async fn main() | {
let opts = Opts::parse();
env_logger::Builder::new()
.target(env_logger::Target::Stdout)
.filter_level(if opts.benchmark || opts.top {
log::LevelFilter::Info
} else {
log::LevelFilter::Trace
})
.init();
let queue_size = if opts.benchmark { 256_000 } else { 4_096 };
let user = opts.user.unwrap_or_else(|| "".to_owned());
let password = opts.password.unwrap_or_else(|| "".to_owned());
let tls_ca = if let Some(cafile) = opts.tls_ca {
Some(tokio::fs::read_to_string(cafile).await.unwrap())
} else {
None
};
let mut config = client::Config::new(&opts.path)
.set_auth(&user, &password) | identifier_body |
|
cli.rs | (
id: String,
client: client::Client,
r_client: Option<client::Client>,
data_channel: mpsc::Receiver<psrt::Message>,
) -> Self {
Self {
id,
client,
r_client,
data_channel: Arc::new(RwLock::new(data_channel)),
}
}
fn is_connected(&self) -> bool {
self.r_client.as_ref().map_or_else(
|| self.client.is_connected(),
|r| r.is_connected() && self.client.is_connected(),
)
}
async fn bye(&self) -> Result<(), psrt::Error> {
self.client.bye().await?;
if let Some(ref r) = self.r_client {
r.bye().await?;
}
Ok(())
}
}
#[allow(clippy::cast_possible_truncation)]
async fn benchmark_message(
name: &str,
message: &[u8],
workers: &[Arc<BenchmarkWorker>],
iterations: u32,
wait_read: bool,
) {
let mut futures = Vec::new();
let message = Arc::new(message.to_vec());
staged_benchmark_start!(name);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
let worker = wrk.clone();
let test_msg = message.clone();
if wait_read {
if let Some(ref r_client) = wrk.r_client {
r_client.subscribe(test_topic.clone()).await.unwrap();
} else {
wrk.client.subscribe(test_topic.clone()).await.unwrap();
}
let data_fut = tokio::spawn(async move {
let mut channel = worker.data_channel.write().await;
for _ in 0..iterations {
let msg = channel.recv().await.unwrap();
assert_eq!(msg.data(), *test_msg);
}
});
futures.push(data_fut);
}
let worker = wrk.clone();
let test_msg = message.clone();
let fut = tokio::spawn(async move {
for _ in 0..iterations {
worker
.client
.publish(DEFAULT_PRIORITY, test_topic.clone(), (*test_msg).clone())
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(workers.len() as u32 * iterations);
for wrk in workers {
assert!(wrk.is_connected());
let test_topic = format!("benchmark/{}/test/{}", wrk.id, name);
wrk.client.unsubscribe(test_topic.clone()).await.unwrap();
}
}
fn prepare_stat_table() -> Table {
let mut table = Table::new();
let format = prettytable::format::FormatBuilder::new()
.column_separator(' ')
.borders(' ')
.separators(
&[prettytable::format::LinePosition::Title],
prettytable::format::LineSeparator::new('-', '-', '-', '-'),
)
.padding(0, 1)
.build();
table.set_format(format);
let titlevec: Vec<prettytable::Cell> = ["topic", "count", "bytes"]
.iter()
.map(|v| prettytable::Cell::new(v).style_spec("Fb"))
.collect();
table.set_titles(prettytable::Row::new(titlevec));
table
}
//#[derive(Eq)]
struct TopicStat {
topic: String,
count: u64,
bytes: u128,
}
impl TopicStat {
fn new(topic: &str) -> Self {
Self {
topic: topic.to_owned(),
count: 0,
bytes: 0,
}
}
fn count(&mut self, size: usize) {
self.bytes += size as u128;
self.count += 1;
}
}
//impl Ord for TopicStat {
//fn cmp(&self, other: &Self) -> core::cmp::Ordering {
//self.count.cmp(&other.count)
//}
//}
//impl PartialOrd for TopicStat {
//fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
//Some(self.cmp(other))
//}
//}
//impl PartialEq for TopicStat {
//fn eq(&self, other: &Self) -> bool {
//self.count == other.count
//}
//}
struct MessageTest {
name: String,
data: Vec<u8>,
iterations: u32,
}
#[allow(clippy::cast_sign_loss)]
#[allow(clippy::cast_possible_truncation)]
impl MessageTest {
fn new(name: &str, iterations: u32) -> Self {
let mut data = Vec::new();
let size = byte_unit::Byte::from_str(name).unwrap().get_bytes();
for i in 0..size {
data.push(i as u8);
}
Self {
name: name.to_owned(),
data,
iterations,
}
}
}
#[allow(clippy::too_many_lines)]
async fn benchmark(
config: &client::Config,
benchmark_workers: u32,
iterations: u32,
sub_node: Option<&String>,
) {
let it_total = benchmark_workers * iterations;
info!(
"Benchmarking, {} workers, {} iterations per worker...",
benchmark_workers, iterations
);
let mut rng = rand::thread_rng();
let mut workers = Vec::new();
for i in 0..benchmark_workers {
let mut client = client::Client::connect(config).await.unwrap();
let (data_channel, r_client) = if let Some(p) = sub_node {
let mut r_config = config.clone();
r_config.update_path(p);
let mut r_client = client::Client::connect(&r_config).await.unwrap();
(r_client.take_data_channel().unwrap(), Some(r_client))
} else | ;
assert!(client.is_connected());
let bi: u32 = rng.gen();
workers.push(Arc::new(BenchmarkWorker::new(
format!("{}/{}", bi, i),
client,
r_client,
data_channel,
)));
}
let mut futures = Vec::new();
staged_benchmark_start!("subscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
let message_tests = vec![
MessageTest::new("10b", iterations),
MessageTest::new("1kb", iterations),
MessageTest::new("10kb", iterations),
MessageTest::new("100kb", iterations / 10),
MessageTest::new("1mb", iterations / 100),
];
for test in message_tests {
benchmark_message(
&format!("pub-{}", test.name),
&test.data,
&workers,
test.iterations,
false,
)
.await;
benchmark_message(
&format!("pub-read-{}", test.name),
&test.data,
&workers,
test.iterations,
true,
)
.await;
}
let mut futures = Vec::new();
staged_benchmark_start!("unsubscribe");
for wrk in &workers {
assert!(wrk.is_connected());
let worker = wrk.clone();
let fut = tokio::spawn(async move {
for i in 0..iterations {
worker
.client
.subscribe(format!("benchmark/{}/{}", worker.id, i))
.await
.unwrap();
}
});
futures.push(fut);
}
for f in futures {
f.await.unwrap();
}
staged_benchmark_finish_current!(it_total);
for wrk in &workers {
wrk.bye().await.unwrap();
assert!(!wrk.is_connected());
}
staged_benchmark_print!();
}
#[inline]
fn parse_topics(topic: Option<&String>) -> Vec<String> {
topic
.expect(ERR_TOPIC_NOT_SPECIFIED)
.split(',')
.into_iter()
.map(ToOwned::to_owned)
.collect::<Vec<String>>()
}
#[tokio::main(worker_threads = 1)]
async fn main() {
let opts = Opts::parse();
env_logger::Builder::new()
.target(env_logger::Target::Stdout)
.filter_level(if opts.benchmark || opts.top {
log::LevelFilter::Info
} else {
log::LevelFilter::Trace
})
.init();
let queue_size = if opts.benchmark { 256_000 } else { 4_096 };
let user = opts.user.unwrap_or_else(|| "".to_owned());
let password = opts.password.unwrap_or_else(|| "".to_owned());
let tls_ca = if let Some(cafile) = opts.tls_ca {
Some(tokio::fs::read_to_string(cafile).await.unwrap())
} else {
None
};
let mut config = client::Config::new(&opts.path)
. | {
(client.take_data_channel().unwrap(), None)
} | conditional_block |
deephash_test.go | (b []byte) []byte {
return append(b, p...)
}
func TestHash(t *testing.T) {
type tuple [2]interface{}
type iface struct{ X interface{} }
type scalars struct {
I8 int8
I16 int16
I32 int32
I64 int64
I int
U8 uint8
U16 uint16
U32 uint32
U64 uint64
U uint
UP uintptr
F32 float32
F64 float64
C64 complex64
C128 complex128
}
type MyBool bool
type MyHeader tar.Header
tests := []struct {
in tuple
wantEq bool
}{
{in: tuple{false, true}, wantEq: false},
{in: tuple{true, true}, wantEq: true},
{in: tuple{false, false}, wantEq: true},
{
in: tuple{
scalars{-8, -16, -32, -64, -1234, 8, 16, 32, 64, 1234, 5678, 32.32, 64.64, 32 + 32i, 64 + 64i},
scalars{-8, -16, -32, -64, -1234, 8, 16, 32, 64, 1234, 5678, 32.32, 64.64, 32 + 32i, 64 + 64i},
},
wantEq: true,
},
{in: tuple{scalars{I8: math.MinInt8}, scalars{I8: math.MinInt8 / 2}}, wantEq: false},
{in: tuple{scalars{I16: math.MinInt16}, scalars{I16: math.MinInt16 / 2}}, wantEq: false},
{in: tuple{scalars{I32: math.MinInt32}, scalars{I32: math.MinInt32 / 2}}, wantEq: false},
{in: tuple{scalars{I64: math.MinInt64}, scalars{I64: math.MinInt64 / 2}}, wantEq: false},
{in: tuple{scalars{I: -1234}, scalars{I: -1234 / 2}}, wantEq: false},
{in: tuple{scalars{U8: math.MaxUint8}, scalars{U8: math.MaxUint8 / 2}}, wantEq: false},
{in: tuple{scalars{U16: math.MaxUint16}, scalars{U16: math.MaxUint16 / 2}}, wantEq: false},
{in: tuple{scalars{U32: math.MaxUint32}, scalars{U32: math.MaxUint32 / 2}}, wantEq: false},
{in: tuple{scalars{U64: math.MaxUint64}, scalars{U64: math.MaxUint64 / 2}}, wantEq: false},
{in: tuple{scalars{U: 1234}, scalars{U: 1234 / 2}}, wantEq: false},
{in: tuple{scalars{UP: 5678}, scalars{UP: 5678 / 2}}, wantEq: false},
{in: tuple{scalars{F32: 32.32}, scalars{F32: math.Nextafter32(32.32, 0)}}, wantEq: false},
{in: tuple{scalars{F64: 64.64}, scalars{F64: math.Nextafter(64.64, 0)}}, wantEq: false},
{in: tuple{scalars{F32: float32(math.NaN())}, scalars{F32: float32(math.NaN())}}, wantEq: true},
{in: tuple{scalars{F64: float64(math.NaN())}, scalars{F64: float64(math.NaN())}}, wantEq: true},
{in: tuple{scalars{C64: 32 + 32i}, scalars{C64: complex(math.Nextafter32(32, 0), 32)}}, wantEq: false},
{in: tuple{scalars{C128: 64 + 64i}, scalars{C128: complex(math.Nextafter(64, 0), 64)}}, wantEq: false},
{in: tuple{[]appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}, []appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}}, wantEq: true},
{in: tuple{[]appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}, []appendBytes{{0, 0, 0, 0, 0, 0, 0, 1}, {}}}, wantEq: false},
{in: tuple{iface{MyBool(true)}, iface{MyBool(true)}}, wantEq: true},
{in: tuple{iface{true}, iface{MyBool(true)}}, wantEq: false},
{in: tuple{iface{MyHeader{}}, iface{MyHeader{}}}, wantEq: true},
{in: tuple{iface{MyHeader{}}, iface{tar.Header{}}}, wantEq: false},
{in: tuple{iface{&MyHeader{}}, iface{&MyHeader{}}}, wantEq: true},
{in: tuple{iface{&MyHeader{}}, iface{&tar.Header{}}}, wantEq: false},
{in: tuple{iface{[]map[string]MyBool{}}, iface{[]map[string]MyBool{}}}, wantEq: true},
{in: tuple{iface{[]map[string]bool{}}, iface{[]map[string]MyBool{}}}, wantEq: false},
{
in: func() tuple {
i1 := 1
i2 := 2
v1 := [3]*int{&i1, &i2, &i1}
v2 := [3]*int{&i1, &i2, &i2}
return tuple{v1, v2}
}(),
wantEq: false,
},
}
for _, tt := range tests {
gotEq := Hash(tt.in[0]) == Hash(tt.in[1])
if gotEq != tt.wantEq {
t.Errorf("(Hash(%v) == Hash(%v)) = %v, want %v", tt.in[0], tt.in[1], gotEq, tt.wantEq)
}
}
}
func TestDeepHash(t *testing.T) {
// v contains the types of values we care about for our current callers.
// Mostly we're just testing that we don't panic on handled types.
v := getVal()
hash1 := Hash(v)
t.Logf("hash: %v", hash1)
for i := 0; i < 20; i++ {
hash2 := Hash(getVal())
if hash1 != hash2 {
t.Error("second hash didn't match")
}
}
}
func getVal() []interface{} {
return []interface{}{
&wgcfg.Config{
Name: "foo",
Addresses: []netaddr.IPPrefix{netaddr.IPPrefixFrom(netaddr.IPFrom16([16]byte{3: 3}), 5)},
Peers: []wgcfg.Peer{
{
PublicKey: key.NodePublic{},
},
},
},
&router.Config{
Routes: []netaddr.IPPrefix{
netaddr.MustParseIPPrefix("1.2.3.0/24"),
netaddr.MustParseIPPrefix("1234::/64"),
},
},
map[dnsname.FQDN][]netaddr.IP{
dnsname.FQDN("a."): {netaddr.MustParseIP("1.2.3.4"), netaddr.MustParseIP("4.3.2.1")},
dnsname.FQDN("b."): {netaddr.MustParseIP("8.8.8.8"), netaddr.MustParseIP("9.9.9.9")},
dnsname.FQDN("c."): {netaddr.MustParseIP("6.6. | AppendTo | identifier_name |
|
deephash_test.go | tuple{[]appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}, []appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}}, wantEq: true},
{in: tuple{[]appendBytes{{}, {0, 0, 0, 0, 0, 0, 0, 1}}, []appendBytes{{0, 0, 0, 0, 0, 0, 0, 1}, {}}}, wantEq: false},
{in: tuple{iface{MyBool(true)}, iface{MyBool(true)}}, wantEq: true},
{in: tuple{iface{true}, iface{MyBool(true)}}, wantEq: false},
{in: tuple{iface{MyHeader{}}, iface{MyHeader{}}}, wantEq: true},
{in: tuple{iface{MyHeader{}}, iface{tar.Header{}}}, wantEq: false},
{in: tuple{iface{&MyHeader{}}, iface{&MyHeader{}}}, wantEq: true},
{in: tuple{iface{&MyHeader{}}, iface{&tar.Header{}}}, wantEq: false},
{in: tuple{iface{[]map[string]MyBool{}}, iface{[]map[string]MyBool{}}}, wantEq: true},
{in: tuple{iface{[]map[string]bool{}}, iface{[]map[string]MyBool{}}}, wantEq: false},
{
in: func() tuple {
i1 := 1
i2 := 2
v1 := [3]*int{&i1, &i2, &i1}
v2 := [3]*int{&i1, &i2, &i2}
return tuple{v1, v2}
}(),
wantEq: false,
},
}
for _, tt := range tests {
gotEq := Hash(tt.in[0]) == Hash(tt.in[1])
if gotEq != tt.wantEq {
t.Errorf("(Hash(%v) == Hash(%v)) = %v, want %v", tt.in[0], tt.in[1], gotEq, tt.wantEq)
}
}
}
func TestDeepHash(t *testing.T) {
// v contains the types of values we care about for our current callers.
// Mostly we're just testing that we don't panic on handled types.
v := getVal()
hash1 := Hash(v)
t.Logf("hash: %v", hash1)
for i := 0; i < 20; i++ {
hash2 := Hash(getVal())
if hash1 != hash2 {
t.Error("second hash didn't match")
}
}
}
func getVal() []interface{} {
return []interface{}{
&wgcfg.Config{
Name: "foo",
Addresses: []netaddr.IPPrefix{netaddr.IPPrefixFrom(netaddr.IPFrom16([16]byte{3: 3}), 5)},
Peers: []wgcfg.Peer{
{
PublicKey: key.NodePublic{},
},
},
},
&router.Config{
Routes: []netaddr.IPPrefix{
netaddr.MustParseIPPrefix("1.2.3.0/24"),
netaddr.MustParseIPPrefix("1234::/64"),
},
},
map[dnsname.FQDN][]netaddr.IP{
dnsname.FQDN("a."): {netaddr.MustParseIP("1.2.3.4"), netaddr.MustParseIP("4.3.2.1")},
dnsname.FQDN("b."): {netaddr.MustParseIP("8.8.8.8"), netaddr.MustParseIP("9.9.9.9")},
dnsname.FQDN("c."): {netaddr.MustParseIP("6.6.6.6"), netaddr.MustParseIP("7.7.7.7")},
dnsname.FQDN("d."): {netaddr.MustParseIP("6.7.6.6"), netaddr.MustParseIP("7.7.7.8")},
dnsname.FQDN("e."): {netaddr.MustParseIP("6.8.6.6"), netaddr.MustParseIP("7.7.7.9")},
dnsname.FQDN("f."): {netaddr.MustParseIP("6.9.6.6"), netaddr.MustParseIP("7.7.7.0")},
},
map[dnsname.FQDN][]netaddr.IPPort{
dnsname.FQDN("a."): {netaddr.MustParseIPPort("1.2.3.4:11"), netaddr.MustParseIPPort("4.3.2.1:22")},
dnsname.FQDN("b."): {netaddr.MustParseIPPort("8.8.8.8:11"), netaddr.MustParseIPPort("9.9.9.9:22")},
dnsname.FQDN("c."): {netaddr.MustParseIPPort("8.8.8.8:12"), netaddr.MustParseIPPort("9.9.9.9:23")},
dnsname.FQDN("d."): {netaddr.MustParseIPPort("8.8.8.8:13"), netaddr.MustParseIPPort("9.9.9.9:24")},
dnsname.FQDN("e."): {netaddr.MustParseIPPort("8.8.8.8:14"), netaddr.MustParseIPPort("9.9.9.9:25")},
},
map[key.DiscoPublic]bool{
key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 2, 31: 0})): false,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 3, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 4, 31: 0})): false,
},
&tailcfg.MapResponse{
DERPMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: &tailcfg.DERPRegion{
RegionID: 1,
RegionCode: "foo",
Nodes: []*tailcfg.DERPNode{
{
Name: "n1",
RegionID: 1,
HostName: "foo.com",
},
{
Name: "n2",
RegionID: 1,
HostName: "bar.com",
},
},
},
},
},
DNSConfig: &tailcfg.DNSConfig{
Resolvers: []dnstype.Resolver{
{Addr: "10.0.0.1"},
},
},
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"1.2.3.4"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "1.2.3.4/32",
Ports: tailcfg.PortRange{First: 1, Last: 2},
},
},
},
},
Peers: []*tailcfg.Node{
{
ID: 1,
},
{
ID: 2,
},
},
UserProfiles: []tailcfg.UserProfile{
{ID: 1, LoginName: "[email protected]"},
{ID: 2, LoginName: "[email protected]"},
},
},
filter.Match{
IPProto: []ipproto.Proto{1, 2, 3},
},
}
}
var sink = Hash("foo")
func BenchmarkHash(b *testing.B) {
b.ReportAllocs()
v := getVal()
for i := 0; i < b.N; i++ {
sink = Hash(v)
}
}
func TestHashMapAcyclic(t *testing.T) {
m := map[int]string{}
for i := 0; i < 100; i++ {
m[i] = fmt.Sprint(i)
}
got := map[string]bool{}
var buf bytes.Buffer
bw := bufio.NewWriter(&buf)
for i := 0; i < 20; i++ {
v := reflect.ValueOf(m)
buf.Reset()
bw.Reset(&buf)
h := &hasher{bw: bw}
h.hashMap(v)
if got[string(buf.Bytes())] {
continue
}
got[string(buf.Bytes())] = true
} | if len(got) != 1 {
t.Errorf("got %d results; want 1", len(got))
}
}
| random_line_split |
|
deephash_test.go | i1, &i2, &i1}
v2 := [3]*int{&i1, &i2, &i2}
return tuple{v1, v2}
}(),
wantEq: false,
},
}
for _, tt := range tests {
gotEq := Hash(tt.in[0]) == Hash(tt.in[1])
if gotEq != tt.wantEq {
t.Errorf("(Hash(%v) == Hash(%v)) = %v, want %v", tt.in[0], tt.in[1], gotEq, tt.wantEq)
}
}
}
func TestDeepHash(t *testing.T) {
// v contains the types of values we care about for our current callers.
// Mostly we're just testing that we don't panic on handled types.
v := getVal()
hash1 := Hash(v)
t.Logf("hash: %v", hash1)
for i := 0; i < 20; i++ {
hash2 := Hash(getVal())
if hash1 != hash2 {
t.Error("second hash didn't match")
}
}
}
func getVal() []interface{} {
return []interface{}{
&wgcfg.Config{
Name: "foo",
Addresses: []netaddr.IPPrefix{netaddr.IPPrefixFrom(netaddr.IPFrom16([16]byte{3: 3}), 5)},
Peers: []wgcfg.Peer{
{
PublicKey: key.NodePublic{},
},
},
},
&router.Config{
Routes: []netaddr.IPPrefix{
netaddr.MustParseIPPrefix("1.2.3.0/24"),
netaddr.MustParseIPPrefix("1234::/64"),
},
},
map[dnsname.FQDN][]netaddr.IP{
dnsname.FQDN("a."): {netaddr.MustParseIP("1.2.3.4"), netaddr.MustParseIP("4.3.2.1")},
dnsname.FQDN("b."): {netaddr.MustParseIP("8.8.8.8"), netaddr.MustParseIP("9.9.9.9")},
dnsname.FQDN("c."): {netaddr.MustParseIP("6.6.6.6"), netaddr.MustParseIP("7.7.7.7")},
dnsname.FQDN("d."): {netaddr.MustParseIP("6.7.6.6"), netaddr.MustParseIP("7.7.7.8")},
dnsname.FQDN("e."): {netaddr.MustParseIP("6.8.6.6"), netaddr.MustParseIP("7.7.7.9")},
dnsname.FQDN("f."): {netaddr.MustParseIP("6.9.6.6"), netaddr.MustParseIP("7.7.7.0")},
},
map[dnsname.FQDN][]netaddr.IPPort{
dnsname.FQDN("a."): {netaddr.MustParseIPPort("1.2.3.4:11"), netaddr.MustParseIPPort("4.3.2.1:22")},
dnsname.FQDN("b."): {netaddr.MustParseIPPort("8.8.8.8:11"), netaddr.MustParseIPPort("9.9.9.9:22")},
dnsname.FQDN("c."): {netaddr.MustParseIPPort("8.8.8.8:12"), netaddr.MustParseIPPort("9.9.9.9:23")},
dnsname.FQDN("d."): {netaddr.MustParseIPPort("8.8.8.8:13"), netaddr.MustParseIPPort("9.9.9.9:24")},
dnsname.FQDN("e."): {netaddr.MustParseIPPort("8.8.8.8:14"), netaddr.MustParseIPPort("9.9.9.9:25")},
},
map[key.DiscoPublic]bool{
key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 2, 31: 0})): false,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 3, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 4, 31: 0})): false,
},
&tailcfg.MapResponse{
DERPMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: &tailcfg.DERPRegion{
RegionID: 1,
RegionCode: "foo",
Nodes: []*tailcfg.DERPNode{
{
Name: "n1",
RegionID: 1,
HostName: "foo.com",
},
{
Name: "n2",
RegionID: 1,
HostName: "bar.com",
},
},
},
},
},
DNSConfig: &tailcfg.DNSConfig{
Resolvers: []dnstype.Resolver{
{Addr: "10.0.0.1"},
},
},
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"1.2.3.4"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "1.2.3.4/32",
Ports: tailcfg.PortRange{First: 1, Last: 2},
},
},
},
},
Peers: []*tailcfg.Node{
{
ID: 1,
},
{
ID: 2,
},
},
UserProfiles: []tailcfg.UserProfile{
{ID: 1, LoginName: "[email protected]"},
{ID: 2, LoginName: "[email protected]"},
},
},
filter.Match{
IPProto: []ipproto.Proto{1, 2, 3},
},
}
}
var sink = Hash("foo")
func BenchmarkHash(b *testing.B) {
b.ReportAllocs()
v := getVal()
for i := 0; i < b.N; i++ {
sink = Hash(v)
}
}
func TestHashMapAcyclic(t *testing.T) {
m := map[int]string{}
for i := 0; i < 100; i++ {
m[i] = fmt.Sprint(i)
}
got := map[string]bool{}
var buf bytes.Buffer
bw := bufio.NewWriter(&buf)
for i := 0; i < 20; i++ {
v := reflect.ValueOf(m)
buf.Reset()
bw.Reset(&buf)
h := &hasher{bw: bw}
h.hashMap(v)
if got[string(buf.Bytes())] {
continue
}
got[string(buf.Bytes())] = true
}
if len(got) != 1 {
t.Errorf("got %d results; want 1", len(got))
}
}
func TestPrintArray(t *testing.T) {
type T struct {
X [32]byte
}
x := T{X: [32]byte{1: 1, 31: 31}}
var got bytes.Buffer
bw := bufio.NewWriter(&got)
h := &hasher{bw: bw}
h.hashValue(reflect.ValueOf(x))
bw.Flush()
const want = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f"
if got := got.Bytes(); string(got) != want {
t.Errorf("wrong:\n got: %q\nwant: %q\n", got, want)
}
}
func BenchmarkHashMapAcyclic(b *testing.B) {
b.ReportAllocs()
m := map[int]string{}
for i := 0; i < 100; i++ {
m[i] = fmt.Sprint(i)
}
var buf bytes.Buffer
bw := bufio.NewWriter(&buf)
v := reflect.ValueOf(m)
h := &hasher{bw: bw}
for i := 0; i < b.N; i++ {
buf.Reset()
bw.Reset(&buf)
h.hashMap(v)
}
}
func BenchmarkTailcfgNode(b *testing.B) | {
b.ReportAllocs()
node := new(tailcfg.Node)
for i := 0; i < b.N; i++ {
sink = Hash(node)
}
} | identifier_body |
|
deephash_test.go | gotEq := Hash(tt.in[0]) == Hash(tt.in[1])
if gotEq != tt.wantEq {
t.Errorf("(Hash(%v) == Hash(%v)) = %v, want %v", tt.in[0], tt.in[1], gotEq, tt.wantEq)
}
}
}
func TestDeepHash(t *testing.T) {
// v contains the types of values we care about for our current callers.
// Mostly we're just testing that we don't panic on handled types.
v := getVal()
hash1 := Hash(v)
t.Logf("hash: %v", hash1)
for i := 0; i < 20; i++ {
hash2 := Hash(getVal())
if hash1 != hash2 {
t.Error("second hash didn't match")
}
}
}
func getVal() []interface{} {
return []interface{}{
&wgcfg.Config{
Name: "foo",
Addresses: []netaddr.IPPrefix{netaddr.IPPrefixFrom(netaddr.IPFrom16([16]byte{3: 3}), 5)},
Peers: []wgcfg.Peer{
{
PublicKey: key.NodePublic{},
},
},
},
&router.Config{
Routes: []netaddr.IPPrefix{
netaddr.MustParseIPPrefix("1.2.3.0/24"),
netaddr.MustParseIPPrefix("1234::/64"),
},
},
map[dnsname.FQDN][]netaddr.IP{
dnsname.FQDN("a."): {netaddr.MustParseIP("1.2.3.4"), netaddr.MustParseIP("4.3.2.1")},
dnsname.FQDN("b."): {netaddr.MustParseIP("8.8.8.8"), netaddr.MustParseIP("9.9.9.9")},
dnsname.FQDN("c."): {netaddr.MustParseIP("6.6.6.6"), netaddr.MustParseIP("7.7.7.7")},
dnsname.FQDN("d."): {netaddr.MustParseIP("6.7.6.6"), netaddr.MustParseIP("7.7.7.8")},
dnsname.FQDN("e."): {netaddr.MustParseIP("6.8.6.6"), netaddr.MustParseIP("7.7.7.9")},
dnsname.FQDN("f."): {netaddr.MustParseIP("6.9.6.6"), netaddr.MustParseIP("7.7.7.0")},
},
map[dnsname.FQDN][]netaddr.IPPort{
dnsname.FQDN("a."): {netaddr.MustParseIPPort("1.2.3.4:11"), netaddr.MustParseIPPort("4.3.2.1:22")},
dnsname.FQDN("b."): {netaddr.MustParseIPPort("8.8.8.8:11"), netaddr.MustParseIPPort("9.9.9.9:22")},
dnsname.FQDN("c."): {netaddr.MustParseIPPort("8.8.8.8:12"), netaddr.MustParseIPPort("9.9.9.9:23")},
dnsname.FQDN("d."): {netaddr.MustParseIPPort("8.8.8.8:13"), netaddr.MustParseIPPort("9.9.9.9:24")},
dnsname.FQDN("e."): {netaddr.MustParseIPPort("8.8.8.8:14"), netaddr.MustParseIPPort("9.9.9.9:25")},
},
map[key.DiscoPublic]bool{
key.DiscoPublicFromRaw32(mem.B([]byte{1: 1, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 2, 31: 0})): false,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 3, 31: 0})): true,
key.DiscoPublicFromRaw32(mem.B([]byte{1: 4, 31: 0})): false,
},
&tailcfg.MapResponse{
DERPMap: &tailcfg.DERPMap{
Regions: map[int]*tailcfg.DERPRegion{
1: &tailcfg.DERPRegion{
RegionID: 1,
RegionCode: "foo",
Nodes: []*tailcfg.DERPNode{
{
Name: "n1",
RegionID: 1,
HostName: "foo.com",
},
{
Name: "n2",
RegionID: 1,
HostName: "bar.com",
},
},
},
},
},
DNSConfig: &tailcfg.DNSConfig{
Resolvers: []dnstype.Resolver{
{Addr: "10.0.0.1"},
},
},
PacketFilter: []tailcfg.FilterRule{
{
SrcIPs: []string{"1.2.3.4"},
DstPorts: []tailcfg.NetPortRange{
{
IP: "1.2.3.4/32",
Ports: tailcfg.PortRange{First: 1, Last: 2},
},
},
},
},
Peers: []*tailcfg.Node{
{
ID: 1,
},
{
ID: 2,
},
},
UserProfiles: []tailcfg.UserProfile{
{ID: 1, LoginName: "[email protected]"},
{ID: 2, LoginName: "[email protected]"},
},
},
filter.Match{
IPProto: []ipproto.Proto{1, 2, 3},
},
}
}
var sink = Hash("foo")
func BenchmarkHash(b *testing.B) {
b.ReportAllocs()
v := getVal()
for i := 0; i < b.N; i++ {
sink = Hash(v)
}
}
func TestHashMapAcyclic(t *testing.T) {
m := map[int]string{}
for i := 0; i < 100; i++ {
m[i] = fmt.Sprint(i)
}
got := map[string]bool{}
var buf bytes.Buffer
bw := bufio.NewWriter(&buf)
for i := 0; i < 20; i++ {
v := reflect.ValueOf(m)
buf.Reset()
bw.Reset(&buf)
h := &hasher{bw: bw}
h.hashMap(v)
if got[string(buf.Bytes())] {
continue
}
got[string(buf.Bytes())] = true
}
if len(got) != 1 {
t.Errorf("got %d results; want 1", len(got))
}
}
func TestPrintArray(t *testing.T) {
type T struct {
X [32]byte
}
x := T{X: [32]byte{1: 1, 31: 31}}
var got bytes.Buffer
bw := bufio.NewWriter(&got)
h := &hasher{bw: bw}
h.hashValue(reflect.ValueOf(x))
bw.Flush()
const want = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f"
if got := got.Bytes(); string(got) != want {
t.Errorf("wrong:\n got: %q\nwant: %q\n", got, want)
}
}
func BenchmarkHashMapAcyclic(b *testing.B) {
b.ReportAllocs()
m := map[int]string{}
for i := 0; i < 100; i++ {
m[i] = fmt.Sprint(i)
}
var buf bytes.Buffer
bw := bufio.NewWriter(&buf)
v := reflect.ValueOf(m)
h := &hasher{bw: bw}
for i := 0; i < b.N; i++ {
buf.Reset()
bw.Reset(&buf)
h.hashMap(v)
}
}
func BenchmarkTailcfgNode(b *testing.B) {
b.ReportAllocs()
node := new(tailcfg.Node)
for i := 0; i < b.N; i++ {
sink = Hash(node)
}
}
func TestExhaustive(t *testing.T) {
seen := make(map[Sum]bool)
for i := 0; i < 100000; i++ {
s := Hash(i)
if seen[s] | {
t.Fatalf("hash collision %v", i)
} | conditional_block |
|
record_db.go | DB struct {
batchLock sync.Mutex
db *store.BadgerDB
}
type recordKey insolar.ID
func (k recordKey) Scope() store.Scope {
return store.ScopeRecord
}
func (k recordKey) DebugString() string {
id := insolar.ID(k)
return "recordKey. " + id.DebugString()
}
func (k recordKey) ID() []byte {
id := insolar.ID(k)
return id.AsBytes()
}
func newRecordKey(raw []byte) recordKey {
pulse := insolar.NewPulseNumber(raw)
hash := raw[pulse.Size():]
return recordKey(*insolar.NewID(pulse, hash))
}
const (
recordPositionKeyPrefix = 0x01
lastKnownRecordPositionKeyPrefix = 0x02
)
type recordPositionKey struct {
pn insolar.PulseNumber
number uint32
}
func newRecordPositionKey(pn insolar.PulseNumber, number uint32) recordPositionKey {
return recordPositionKey{pn: pn, number: number}
}
func (k recordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k recordPositionKey) ID() []byte {
parsedNum := make([]byte, 4)
binary.BigEndian.PutUint32(parsedNum, k.number)
return bytes.Join([][]byte{{recordPositionKeyPrefix}, k.pn.Bytes(), parsedNum}, nil)
}
func newRecordPositionKeyFromBytes(raw []byte) recordPositionKey {
k := recordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
k.number = binary.BigEndian.Uint32(raw[(k.pn.Size() + 1):])
return k
}
func (k recordPositionKey) String() string {
return fmt.Sprintf("recordPositionKey. pulse: %d, number: %d", k.pn, k.number)
}
type lastKnownRecordPositionKey struct {
pn insolar.PulseNumber
}
func newLastKnownRecordPositionKey(raw []byte) lastKnownRecordPositionKey {
k := lastKnownRecordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
return k
}
func (k lastKnownRecordPositionKey) String() string {
return fmt.Sprintf("lastKnownRecordPositionKey. pulse: %d", k.pn)
}
func (k lastKnownRecordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k lastKnownRecordPositionKey) ID() []byte {
return bytes.Join([][]byte{{lastKnownRecordPositionKeyPrefix}, k.pn.Bytes()}, nil)
}
// NewRecordDB creates new DB storage instance.
func NewRecordDB(db *store.BadgerDB) *RecordDB {
return &RecordDB{db: db}
}
// Set saves new record-value in storage.
func (r *RecordDB) Set(ctx context.Context, rec record.Material) error {
return r.BatchSet(ctx, []record.Material{rec})
}
// BatchSet saves a batch of records to storage with order-processing.
func (r *RecordDB) BatchSet(ctx context.Context, recs []record.Material) error {
if len(recs) == 0 {
return nil
}
r.batchLock.Lock()
defer r.batchLock.Unlock()
// It's possible, that in the batch can be records from different pulses
// because of that we need to track a current pulse and position
// for different pulses position is requested from db
// We can get position on every loop, but we SHOULDN'T do this
// Because it isn't efficient
lastKnowPulse := insolar.PulseNumber(0)
position := uint32(0)
err := r.db.Backend().Update(func(txn *badger.Txn) error {
for _, rec := range recs {
if rec.ID.IsEmpty() {
return errors.New("id is empty")
}
err := setRecord(txn, recordKey(rec.ID), rec)
if err != nil {
return err
}
// For cross-pulse batches
if lastKnowPulse != rec.ID.Pulse() {
// Set last known before changing pulse/position
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err | if err != nil && err != ErrNotFound {
return err
}
lastKnowPulse = rec.ID.Pulse()
}
position++
err = setPosition(txn, rec.ID, position)
if err != nil {
return err
}
}
// set position for last record
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// setRecord is a helper method for storaging record to db in scope of txn.
func setRecord(txn *badger.Txn, key store.Key, record record.Material) error {
data, err := record.Marshal()
if err != nil {
return err
}
fullKey := append(key.Scope().Bytes(), key.ID()...)
_, err = txn.Get(fullKey)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
if err == nil {
return ErrOverride
}
return txn.Set(fullKey, data)
}
// setRecord is a helper method for getting last known position of record to db in scope of txn and pulse.
func getLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber) (uint32, error) {
key := lastKnownRecordPositionKey{pn: pn}
fullKey := append(key.Scope().Bytes(), key.ID()...)
item, err := txn.Get(fullKey)
if err != nil {
if err == badger.ErrKeyNotFound {
return 0, ErrNotFound
}
return 0, err
}
buff, err := item.ValueCopy(nil)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(buff), nil
}
// setLastKnownPosition is a helper method for setting last known position of record to db in scope of txn and pulse.
func setLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber, position uint32) error {
lastPositionKey := lastKnownRecordPositionKey{pn: pn}
parsedPosition := make([]byte, 4)
binary.BigEndian.PutUint32(parsedPosition, position)
fullKey := append(lastPositionKey.Scope().Bytes(), lastPositionKey.ID()...)
return txn.Set(fullKey, parsedPosition)
}
func setPosition(txn *badger.Txn, recID insolar.ID, position uint32) error {
positionKey := newRecordPositionKey(recID.Pulse(), position)
fullKey := append(positionKey.Scope().Bytes(), positionKey.ID()...)
return txn.Set(fullKey, recID.Bytes())
}
func (r *RecordDB) truncateRecordsHead(ctx context.Context, from insolar.PulseNumber) error {
keyFrom := recordKey(*insolar.NewID(from, nil))
it := store.NewReadIterator(r.db.Backend(), keyFrom, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
key := newRecordKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key.DebugString())
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key.DebugString())
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from.String())
}
return nil
}
func (r *RecordDB) truncatePositionRecordHead(ctx context.Context, from store.Key, prefix byte) error {
it := store.NewReadIterator(r.db.Backend(), from, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
if it.Key()[0] != prefix {
continue
}
key := makePositionKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key)
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key)
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from)
}
return nil
}
func makePositionKey(raw []byte) store.Key {
switch raw[0] {
case recordPositionKeyPrefix:
return newRecordPositionKeyFromBytes(raw)
case lastKnownRecordPositionKeyPrefix:
return newLastKnownRecordPositionKey(raw)
default:
panic("unknown prefix: " + string(raw[0]))
}
}
// TruncateHead remove all records after lastPulse
func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {
if err := r.truncateRecordsHead(ctx, from); err != nil {
return errors.Wrap(err, "failed to truncate records head")
}
if err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKeyPrefix); err != nil {
return errors.Wrap(err, "failed to truncate | }
// fetch position for a new pulse
position, err = getLastKnownPosition(txn, rec.ID.Pulse()) | random_line_split |
record_db.go | struct {
batchLock sync.Mutex
db *store.BadgerDB
}
type recordKey insolar.ID
func (k recordKey) Scope() store.Scope {
return store.ScopeRecord
}
func (k recordKey) DebugString() string {
id := insolar.ID(k)
return "recordKey. " + id.DebugString()
}
func (k recordKey) ID() []byte {
id := insolar.ID(k)
return id.AsBytes()
}
func newRecordKey(raw []byte) recordKey {
pulse := insolar.NewPulseNumber(raw)
hash := raw[pulse.Size():]
return recordKey(*insolar.NewID(pulse, hash))
}
const (
recordPositionKeyPrefix = 0x01
lastKnownRecordPositionKeyPrefix = 0x02
)
type recordPositionKey struct {
pn insolar.PulseNumber
number uint32
}
func newRecordPositionKey(pn insolar.PulseNumber, number uint32) recordPositionKey {
return recordPositionKey{pn: pn, number: number}
}
func (k recordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k recordPositionKey) ID() []byte {
parsedNum := make([]byte, 4)
binary.BigEndian.PutUint32(parsedNum, k.number)
return bytes.Join([][]byte{{recordPositionKeyPrefix}, k.pn.Bytes(), parsedNum}, nil)
}
func newRecordPositionKeyFromBytes(raw []byte) recordPositionKey {
k := recordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
k.number = binary.BigEndian.Uint32(raw[(k.pn.Size() + 1):])
return k
}
func (k recordPositionKey) String() string {
return fmt.Sprintf("recordPositionKey. pulse: %d, number: %d", k.pn, k.number)
}
type lastKnownRecordPositionKey struct {
pn insolar.PulseNumber
}
func newLastKnownRecordPositionKey(raw []byte) lastKnownRecordPositionKey {
k := lastKnownRecordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
return k
}
func (k lastKnownRecordPositionKey) String() string |
func (k lastKnownRecordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k lastKnownRecordPositionKey) ID() []byte {
return bytes.Join([][]byte{{lastKnownRecordPositionKeyPrefix}, k.pn.Bytes()}, nil)
}
// NewRecordDB creates new DB storage instance.
func NewRecordDB(db *store.BadgerDB) *RecordDB {
return &RecordDB{db: db}
}
// Set saves new record-value in storage.
func (r *RecordDB) Set(ctx context.Context, rec record.Material) error {
return r.BatchSet(ctx, []record.Material{rec})
}
// BatchSet saves a batch of records to storage with order-processing.
func (r *RecordDB) BatchSet(ctx context.Context, recs []record.Material) error {
if len(recs) == 0 {
return nil
}
r.batchLock.Lock()
defer r.batchLock.Unlock()
// It's possible, that in the batch can be records from different pulses
// because of that we need to track a current pulse and position
// for different pulses position is requested from db
// We can get position on every loop, but we SHOULDN'T do this
// Because it isn't efficient
lastKnowPulse := insolar.PulseNumber(0)
position := uint32(0)
err := r.db.Backend().Update(func(txn *badger.Txn) error {
for _, rec := range recs {
if rec.ID.IsEmpty() {
return errors.New("id is empty")
}
err := setRecord(txn, recordKey(rec.ID), rec)
if err != nil {
return err
}
// For cross-pulse batches
if lastKnowPulse != rec.ID.Pulse() {
// Set last known before changing pulse/position
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
// fetch position for a new pulse
position, err = getLastKnownPosition(txn, rec.ID.Pulse())
if err != nil && err != ErrNotFound {
return err
}
lastKnowPulse = rec.ID.Pulse()
}
position++
err = setPosition(txn, rec.ID, position)
if err != nil {
return err
}
}
// set position for last record
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// setRecord is a helper method for storaging record to db in scope of txn.
func setRecord(txn *badger.Txn, key store.Key, record record.Material) error {
data, err := record.Marshal()
if err != nil {
return err
}
fullKey := append(key.Scope().Bytes(), key.ID()...)
_, err = txn.Get(fullKey)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
if err == nil {
return ErrOverride
}
return txn.Set(fullKey, data)
}
// setRecord is a helper method for getting last known position of record to db in scope of txn and pulse.
func getLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber) (uint32, error) {
key := lastKnownRecordPositionKey{pn: pn}
fullKey := append(key.Scope().Bytes(), key.ID()...)
item, err := txn.Get(fullKey)
if err != nil {
if err == badger.ErrKeyNotFound {
return 0, ErrNotFound
}
return 0, err
}
buff, err := item.ValueCopy(nil)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(buff), nil
}
// setLastKnownPosition is a helper method for setting last known position of record to db in scope of txn and pulse.
func setLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber, position uint32) error {
lastPositionKey := lastKnownRecordPositionKey{pn: pn}
parsedPosition := make([]byte, 4)
binary.BigEndian.PutUint32(parsedPosition, position)
fullKey := append(lastPositionKey.Scope().Bytes(), lastPositionKey.ID()...)
return txn.Set(fullKey, parsedPosition)
}
func setPosition(txn *badger.Txn, recID insolar.ID, position uint32) error {
positionKey := newRecordPositionKey(recID.Pulse(), position)
fullKey := append(positionKey.Scope().Bytes(), positionKey.ID()...)
return txn.Set(fullKey, recID.Bytes())
}
func (r *RecordDB) truncateRecordsHead(ctx context.Context, from insolar.PulseNumber) error {
keyFrom := recordKey(*insolar.NewID(from, nil))
it := store.NewReadIterator(r.db.Backend(), keyFrom, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
key := newRecordKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key.DebugString())
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key.DebugString())
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from.String())
}
return nil
}
func (r *RecordDB) truncatePositionRecordHead(ctx context.Context, from store.Key, prefix byte) error {
it := store.NewReadIterator(r.db.Backend(), from, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
if it.Key()[0] != prefix {
continue
}
key := makePositionKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key)
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key)
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from)
}
return nil
}
func makePositionKey(raw []byte) store.Key {
switch raw[0] {
case recordPositionKeyPrefix:
return newRecordPositionKeyFromBytes(raw)
case lastKnownRecordPositionKeyPrefix:
return newLastKnownRecordPositionKey(raw)
default:
panic("unknown prefix: " + string(raw[0]))
}
}
// TruncateHead remove all records after lastPulse
func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {
if err := r.truncateRecordsHead(ctx, from); err != nil {
return errors.Wrap(err, "failed to truncate records head")
}
if err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKeyPrefix); err != nil {
return errors.Wrap(err, "failed | {
return fmt.Sprintf("lastKnownRecordPositionKey. pulse: %d", k.pn)
} | identifier_body |
record_db.go | Prefix}, k.pn.Bytes(), parsedNum}, nil)
}
func newRecordPositionKeyFromBytes(raw []byte) recordPositionKey {
k := recordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
k.number = binary.BigEndian.Uint32(raw[(k.pn.Size() + 1):])
return k
}
func (k recordPositionKey) String() string {
return fmt.Sprintf("recordPositionKey. pulse: %d, number: %d", k.pn, k.number)
}
type lastKnownRecordPositionKey struct {
pn insolar.PulseNumber
}
func newLastKnownRecordPositionKey(raw []byte) lastKnownRecordPositionKey {
k := lastKnownRecordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
return k
}
func (k lastKnownRecordPositionKey) String() string {
return fmt.Sprintf("lastKnownRecordPositionKey. pulse: %d", k.pn)
}
func (k lastKnownRecordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k lastKnownRecordPositionKey) ID() []byte {
return bytes.Join([][]byte{{lastKnownRecordPositionKeyPrefix}, k.pn.Bytes()}, nil)
}
// NewRecordDB creates new DB storage instance.
func NewRecordDB(db *store.BadgerDB) *RecordDB {
return &RecordDB{db: db}
}
// Set saves new record-value in storage.
func (r *RecordDB) Set(ctx context.Context, rec record.Material) error {
return r.BatchSet(ctx, []record.Material{rec})
}
// BatchSet saves a batch of records to storage with order-processing.
func (r *RecordDB) BatchSet(ctx context.Context, recs []record.Material) error {
if len(recs) == 0 {
return nil
}
r.batchLock.Lock()
defer r.batchLock.Unlock()
// It's possible, that in the batch can be records from different pulses
// because of that we need to track a current pulse and position
// for different pulses position is requested from db
// We can get position on every loop, but we SHOULDN'T do this
// Because it isn't efficient
lastKnowPulse := insolar.PulseNumber(0)
position := uint32(0)
err := r.db.Backend().Update(func(txn *badger.Txn) error {
for _, rec := range recs {
if rec.ID.IsEmpty() {
return errors.New("id is empty")
}
err := setRecord(txn, recordKey(rec.ID), rec)
if err != nil {
return err
}
// For cross-pulse batches
if lastKnowPulse != rec.ID.Pulse() {
// Set last known before changing pulse/position
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
// fetch position for a new pulse
position, err = getLastKnownPosition(txn, rec.ID.Pulse())
if err != nil && err != ErrNotFound {
return err
}
lastKnowPulse = rec.ID.Pulse()
}
position++
err = setPosition(txn, rec.ID, position)
if err != nil {
return err
}
}
// set position for last record
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// setRecord is a helper method for storaging record to db in scope of txn.
func setRecord(txn *badger.Txn, key store.Key, record record.Material) error {
data, err := record.Marshal()
if err != nil {
return err
}
fullKey := append(key.Scope().Bytes(), key.ID()...)
_, err = txn.Get(fullKey)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
if err == nil {
return ErrOverride
}
return txn.Set(fullKey, data)
}
// setRecord is a helper method for getting last known position of record to db in scope of txn and pulse.
func getLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber) (uint32, error) {
key := lastKnownRecordPositionKey{pn: pn}
fullKey := append(key.Scope().Bytes(), key.ID()...)
item, err := txn.Get(fullKey)
if err != nil {
if err == badger.ErrKeyNotFound {
return 0, ErrNotFound
}
return 0, err
}
buff, err := item.ValueCopy(nil)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(buff), nil
}
// setLastKnownPosition is a helper method for setting last known position of record to db in scope of txn and pulse.
func setLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber, position uint32) error {
lastPositionKey := lastKnownRecordPositionKey{pn: pn}
parsedPosition := make([]byte, 4)
binary.BigEndian.PutUint32(parsedPosition, position)
fullKey := append(lastPositionKey.Scope().Bytes(), lastPositionKey.ID()...)
return txn.Set(fullKey, parsedPosition)
}
func setPosition(txn *badger.Txn, recID insolar.ID, position uint32) error {
positionKey := newRecordPositionKey(recID.Pulse(), position)
fullKey := append(positionKey.Scope().Bytes(), positionKey.ID()...)
return txn.Set(fullKey, recID.Bytes())
}
func (r *RecordDB) truncateRecordsHead(ctx context.Context, from insolar.PulseNumber) error {
keyFrom := recordKey(*insolar.NewID(from, nil))
it := store.NewReadIterator(r.db.Backend(), keyFrom, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
key := newRecordKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key.DebugString())
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key.DebugString())
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from.String())
}
return nil
}
func (r *RecordDB) truncatePositionRecordHead(ctx context.Context, from store.Key, prefix byte) error {
it := store.NewReadIterator(r.db.Backend(), from, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
if it.Key()[0] != prefix {
continue
}
key := makePositionKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key)
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key)
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from)
}
return nil
}
func makePositionKey(raw []byte) store.Key {
switch raw[0] {
case recordPositionKeyPrefix:
return newRecordPositionKeyFromBytes(raw)
case lastKnownRecordPositionKeyPrefix:
return newLastKnownRecordPositionKey(raw)
default:
panic("unknown prefix: " + string(raw[0]))
}
}
// TruncateHead remove all records after lastPulse
func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {
if err := r.truncateRecordsHead(ctx, from); err != nil {
return errors.Wrap(err, "failed to truncate records head")
}
if err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKeyPrefix); err != nil {
return errors.Wrap(err, "failed to truncate record positions head")
}
if err := r.truncatePositionRecordHead(ctx, lastKnownRecordPositionKey{pn: from}, lastKnownRecordPositionKeyPrefix); err != nil {
return errors.Wrap(err, "failed to truncate last known record positions head")
}
return nil
}
// ForID returns record for provided id.
func (r *RecordDB) ForID(ctx context.Context, id insolar.ID) (record.Material, error) {
return r.get(id)
}
// get loads record.Material from DB
func (r *RecordDB) get(id insolar.ID) (record.Material, error) {
var buff []byte
var err error
err = r.db.Backend().View(func(txn *badger.Txn) error {
key := recordKey(id)
fullKey := append(key.Scope().Bytes(), key.ID()...)
item, err := txn.Get(fullKey)
if err != nil {
if err == badger.ErrKeyNotFound {
return ErrNotFound
}
return err
}
buff, err = item.ValueCopy(nil)
return err
})
if err != nil {
return record.Material{}, err
}
rec := record.Material{}
err = rec.Unmarshal(buff)
return rec, err
}
// LastKnownPosition returns last known position of record in Pulse.
func (r *RecordDB) | LastKnownPosition | identifier_name |
|
record_db.go | struct {
batchLock sync.Mutex
db *store.BadgerDB
}
type recordKey insolar.ID
func (k recordKey) Scope() store.Scope {
return store.ScopeRecord
}
func (k recordKey) DebugString() string {
id := insolar.ID(k)
return "recordKey. " + id.DebugString()
}
func (k recordKey) ID() []byte {
id := insolar.ID(k)
return id.AsBytes()
}
func newRecordKey(raw []byte) recordKey {
pulse := insolar.NewPulseNumber(raw)
hash := raw[pulse.Size():]
return recordKey(*insolar.NewID(pulse, hash))
}
const (
recordPositionKeyPrefix = 0x01
lastKnownRecordPositionKeyPrefix = 0x02
)
type recordPositionKey struct {
pn insolar.PulseNumber
number uint32
}
func newRecordPositionKey(pn insolar.PulseNumber, number uint32) recordPositionKey {
return recordPositionKey{pn: pn, number: number}
}
func (k recordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k recordPositionKey) ID() []byte {
parsedNum := make([]byte, 4)
binary.BigEndian.PutUint32(parsedNum, k.number)
return bytes.Join([][]byte{{recordPositionKeyPrefix}, k.pn.Bytes(), parsedNum}, nil)
}
func newRecordPositionKeyFromBytes(raw []byte) recordPositionKey {
k := recordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
k.number = binary.BigEndian.Uint32(raw[(k.pn.Size() + 1):])
return k
}
func (k recordPositionKey) String() string {
return fmt.Sprintf("recordPositionKey. pulse: %d, number: %d", k.pn, k.number)
}
type lastKnownRecordPositionKey struct {
pn insolar.PulseNumber
}
func newLastKnownRecordPositionKey(raw []byte) lastKnownRecordPositionKey {
k := lastKnownRecordPositionKey{}
k.pn = insolar.NewPulseNumber(raw[1:])
return k
}
func (k lastKnownRecordPositionKey) String() string {
return fmt.Sprintf("lastKnownRecordPositionKey. pulse: %d", k.pn)
}
func (k lastKnownRecordPositionKey) Scope() store.Scope {
return store.ScopeRecordPosition
}
func (k lastKnownRecordPositionKey) ID() []byte {
return bytes.Join([][]byte{{lastKnownRecordPositionKeyPrefix}, k.pn.Bytes()}, nil)
}
// NewRecordDB creates new DB storage instance.
func NewRecordDB(db *store.BadgerDB) *RecordDB {
return &RecordDB{db: db}
}
// Set saves new record-value in storage.
func (r *RecordDB) Set(ctx context.Context, rec record.Material) error {
return r.BatchSet(ctx, []record.Material{rec})
}
// BatchSet saves a batch of records to storage with order-processing.
func (r *RecordDB) BatchSet(ctx context.Context, recs []record.Material) error {
if len(recs) == 0 {
return nil
}
r.batchLock.Lock()
defer r.batchLock.Unlock()
// It's possible, that in the batch can be records from different pulses
// because of that we need to track a current pulse and position
// for different pulses position is requested from db
// We can get position on every loop, but we SHOULDN'T do this
// Because it isn't efficient
lastKnowPulse := insolar.PulseNumber(0)
position := uint32(0)
err := r.db.Backend().Update(func(txn *badger.Txn) error {
for _, rec := range recs {
if rec.ID.IsEmpty() {
return errors.New("id is empty")
}
err := setRecord(txn, recordKey(rec.ID), rec)
if err != nil {
return err
}
// For cross-pulse batches
if lastKnowPulse != rec.ID.Pulse() {
// Set last known before changing pulse/position
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
// fetch position for a new pulse
position, err = getLastKnownPosition(txn, rec.ID.Pulse())
if err != nil && err != ErrNotFound {
return err
}
lastKnowPulse = rec.ID.Pulse()
}
position++
err = setPosition(txn, rec.ID, position)
if err != nil |
}
// set position for last record
err := setLastKnownPosition(txn, lastKnowPulse, position)
if err != nil {
return err
}
return nil
})
if err != nil {
return err
}
return nil
}
// setRecord is a helper method for storaging record to db in scope of txn.
func setRecord(txn *badger.Txn, key store.Key, record record.Material) error {
data, err := record.Marshal()
if err != nil {
return err
}
fullKey := append(key.Scope().Bytes(), key.ID()...)
_, err = txn.Get(fullKey)
if err != nil && err != badger.ErrKeyNotFound {
return err
}
if err == nil {
return ErrOverride
}
return txn.Set(fullKey, data)
}
// setRecord is a helper method for getting last known position of record to db in scope of txn and pulse.
func getLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber) (uint32, error) {
key := lastKnownRecordPositionKey{pn: pn}
fullKey := append(key.Scope().Bytes(), key.ID()...)
item, err := txn.Get(fullKey)
if err != nil {
if err == badger.ErrKeyNotFound {
return 0, ErrNotFound
}
return 0, err
}
buff, err := item.ValueCopy(nil)
if err != nil {
return 0, err
}
return binary.BigEndian.Uint32(buff), nil
}
// setLastKnownPosition is a helper method for setting last known position of record to db in scope of txn and pulse.
func setLastKnownPosition(txn *badger.Txn, pn insolar.PulseNumber, position uint32) error {
lastPositionKey := lastKnownRecordPositionKey{pn: pn}
parsedPosition := make([]byte, 4)
binary.BigEndian.PutUint32(parsedPosition, position)
fullKey := append(lastPositionKey.Scope().Bytes(), lastPositionKey.ID()...)
return txn.Set(fullKey, parsedPosition)
}
func setPosition(txn *badger.Txn, recID insolar.ID, position uint32) error {
positionKey := newRecordPositionKey(recID.Pulse(), position)
fullKey := append(positionKey.Scope().Bytes(), positionKey.ID()...)
return txn.Set(fullKey, recID.Bytes())
}
func (r *RecordDB) truncateRecordsHead(ctx context.Context, from insolar.PulseNumber) error {
keyFrom := recordKey(*insolar.NewID(from, nil))
it := store.NewReadIterator(r.db.Backend(), keyFrom, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
key := newRecordKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key.DebugString())
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key.DebugString())
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from.String())
}
return nil
}
func (r *RecordDB) truncatePositionRecordHead(ctx context.Context, from store.Key, prefix byte) error {
it := store.NewReadIterator(r.db.Backend(), from, false)
defer it.Close()
var hasKeys bool
for it.Next() {
hasKeys = true
if it.Key()[0] != prefix {
continue
}
key := makePositionKey(it.Key())
err := r.db.Delete(key)
if err != nil {
return errors.Wrapf(err, "can't delete key: %s", key)
}
inslogger.FromContext(ctx).Debugf("Erased key: %s", key)
}
if !hasKeys {
inslogger.FromContext(ctx).Infof("No records. Nothing done. Start key: %s", from)
}
return nil
}
func makePositionKey(raw []byte) store.Key {
switch raw[0] {
case recordPositionKeyPrefix:
return newRecordPositionKeyFromBytes(raw)
case lastKnownRecordPositionKeyPrefix:
return newLastKnownRecordPositionKey(raw)
default:
panic("unknown prefix: " + string(raw[0]))
}
}
// TruncateHead remove all records after lastPulse
func (r *RecordDB) TruncateHead(ctx context.Context, from insolar.PulseNumber) error {
if err := r.truncateRecordsHead(ctx, from); err != nil {
return errors.Wrap(err, "failed to truncate records head")
}
if err := r.truncatePositionRecordHead(ctx, recordPositionKey{pn: from}, recordPositionKeyPrefix); err != nil {
return errors.Wrap(err, "failed | {
return err
} | conditional_block |
queueing_honey_badger.rs | <SQ, A>,
id: NodeId,
num_txs: usize,
mut rng: &mut TestRng,
) where
A: Adversary<SQ>,
{
for tx in (num_txs / 2)..num_txs {
let _ = net.send_input(id, Input::User(tx), &mut rng);
}
}
/// Proposes `num_txs` values and expects nodes to output and order them.
fn test_queueing_honey_badger<A>(mut net: VirtualNet<SQ, A>, num_txs: usize, mut rng: &mut TestRng)
where
A: Adversary<SQ>,
{
// Make two copies of all public keys.
let pub_keys_add = net
.correct_nodes()
.nth(0)
.expect("At least one correct node needs to exist")
.algorithm()
.algo()
.dyn_hb()
.public_keys()
.clone();
let mut pub_keys_rm = pub_keys_add.clone();
// Get the first correct node id as candidate for removal/re-adding.
let first_correct_node = *net.correct_nodes().nth(0).unwrap().id();
// Remove the first correct node, which is to be removed.
Arc::make_mut(&mut pub_keys_rm).remove(&first_correct_node);
// Broadcast public keys of all nodes except for the node to be removed.
let _ = net.broadcast_input(
&Input::Change(Change::NodeChange(pub_keys_rm.clone())),
&mut rng,
);
// Broadcast the first half of the transactions.
for tx in 0..(num_txs / 2) {
let _ = net.broadcast_input(&Input::User(tx), &mut rng);
}
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes but the removed node.
let has_remove = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_rm,
_ => false,
})
};
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes, including the previously removed node.
let has_add = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_add,
_ => false,
})
};
// Returns `true` if the node has not output all changes or transactions yet.
let node_busy = |node: &Node<SQ>| {
!has_remove(node) || !has_add(node) || !node.algorithm().algo().queue().is_empty()
};
// All nodes await removal.
let mut awaiting_removal: BTreeSet<_> = net.correct_nodes().map(|node| *node.id()).collect();
// All nodes but the removed node await addition.
let mut awaiting_addition: BTreeSet<_> = net
.correct_nodes()
.map(|node| *node.id())
.filter(|id| *id != first_correct_node)
.collect();
// All, including the previously removed node, await the second half of transactions.
let mut awaiting_second_half: BTreeSet<_> = awaiting_removal.clone();
// Whether the first correct node was rejoined as a validator.
let mut rejoined_first_correct = false;
// The removed first correct node which is to be restarted as soon as all remaining
// validators agreed to add it back.
let mut saved_first_correct: Option<Node<SQ>> = None;
// Handle messages in random order until all nodes have output all transactions.
while net.correct_nodes().any(node_busy) {
let stepped_id = net.crank_expect(&mut rng).0;
if awaiting_removal.contains(&stepped_id) && has_remove(&net.get(stepped_id).unwrap()) {
awaiting_removal.remove(&stepped_id);
info!(
"{:?} has finished waiting for node removal; still waiting: {:?}",
stepped_id, awaiting_removal
);
if awaiting_removal.is_empty() {
info!("Removing first correct node from the test network");
saved_first_correct = net.remove_node(&first_correct_node);
}
// Vote to add the first correct node back.
if stepped_id != first_correct_node {
let _ = net.send_input(
stepped_id,
Input::Change(Change::NodeChange(pub_keys_add.clone())),
rng,
);
info!(
"Input the vote to add the first correct node into {:?} with netinfo {:?}",
stepped_id,
net.get(stepped_id).unwrap().algorithm().algo().netinfo()
);
}
}
if awaiting_removal.is_empty() && awaiting_addition.contains(&stepped_id) {
// If the stepped node started voting to add the first correct node back,
// take a note of that and rejoin it.
if let Some(join_plan) =
net.get(stepped_id)
.unwrap()
.outputs()
.iter()
.find_map(|batch| match batch.change() {
ChangeState::InProgress(Change::NodeChange(pub_keys))
if pub_keys == &pub_keys_add =>
{
batch.join_plan()
}
_ => None,
})
{
awaiting_addition.remove(&stepped_id);
info!(
"{:?} has finished waiting for node addition; still waiting: {:?}",
stepped_id, awaiting_addition
);
if awaiting_addition.is_empty() && !rejoined_first_correct {
let node = saved_first_correct
.take()
.expect("first correct node wasn't saved");
let step = restart_node_for_add(&mut net, node, join_plan, &mut rng);
net.process_step(first_correct_node, &step)
.expect("processing a step failed");
rejoined_first_correct = true;
}
}
}
if rejoined_first_correct && awaiting_second_half.contains(&stepped_id) {
// Input the second half of user transactions into the stepped node.
input_second_half(&mut net, stepped_id, num_txs, &mut rng);
awaiting_second_half.remove(&stepped_id);
}
}
let node_1 = net
.correct_nodes()
.nth(1)
.expect("second correct node is missing");
net.verify_batches(node_1);
}
/// Restarts specified node on the test network for adding it back as a validator.
fn restart_node_for_add<R, A>(
net: &mut VirtualNet<SQ, A>,
mut node: Node<SQ>,
join_plan: JoinPlan<NodeId>,
mut rng: &mut R,
) -> Step<QueueingHoneyBadger<usize, NodeId, Vec<usize>>>
where
R: rand::Rng,
A: Adversary<SQ>,
{
let our_id = *node.id();
println!("Restarting node {} with {:?}", node.id(), join_plan);
// TODO: When an observer node is added to the network, it should also be added to peer_ids.
let peer_ids: Vec<_> = net
.nodes()
.map(Node::id)
.filter(|id| *id != node.id())
.cloned()
.collect();
let secret_key = node.algorithm().algo().dyn_hb().secret_key().clone();
let (qhb, qhb_step) =
QueueingHoneyBadger::builder_joining(our_id, secret_key, join_plan, &mut rng)
.and_then(|builder| builder.batch_size(3).build(&mut rng))
.expect("failed to rebuild the node with a join plan");
let (sq, mut sq_step) = SenderQueue::builder(qhb, peer_ids.into_iter()).build(our_id);
*node.algorithm_mut() = sq;
sq_step.extend(qhb_step.map(|output| output, |fault| fault, Message::from));
net.insert_node(node);
sq_step
}
// Allow passing `netinfo` by value. `TestNetwork` expects this function signature.
#[allow(clippy::needless_pass_by_value)]
fn | (node_info: NewNodeInfo<SQ>, seed: TestRngSeed) -> (SQ, Step<QHB>) {
let mut rng: TestRng = TestRng::from_seed(seed);
let peer_ids = node_info.netinfo.other_ids().cloned();
let netinfo = node_info.netinfo.clone();
let dhb =
DynamicHoneyBadger::builder().build(netinfo, node_info.secret_key, node_info.pub_keys);
let (qhb, qhb_step) = QueueingHoneyBadger::builder(dhb)
.batch_size(3)
.build(&mut rng)
.expect("failed to build QueueingHoneyBadger");
let our_id = *node_info.netinfo.our_id();
let (sq, mut step) = SenderQueue::builder(qhb, peer_ids).build(our_id);
let output = step.extend_with(qhb_step, |fault| fault, Message::from);
assert!(output.is_empty());
(sq, step)
}
fn test_queueing_honey_badger_different_sizes<A, F>(
new_adversary: F,
num_txs: usize,
seed | new_queueing_hb | identifier_name |
queueing_honey_badger.rs | <SQ, A>,
id: NodeId,
num_txs: usize,
mut rng: &mut TestRng,
) where
A: Adversary<SQ>,
{
for tx in (num_txs / 2)..num_txs {
let _ = net.send_input(id, Input::User(tx), &mut rng);
}
}
/// Proposes `num_txs` values and expects nodes to output and order them.
fn test_queueing_honey_badger<A>(mut net: VirtualNet<SQ, A>, num_txs: usize, mut rng: &mut TestRng)
where
A: Adversary<SQ>,
{
// Make two copies of all public keys.
let pub_keys_add = net
.correct_nodes()
.nth(0)
.expect("At least one correct node needs to exist")
.algorithm()
.algo()
.dyn_hb()
.public_keys()
.clone();
let mut pub_keys_rm = pub_keys_add.clone();
// Get the first correct node id as candidate for removal/re-adding.
let first_correct_node = *net.correct_nodes().nth(0).unwrap().id();
// Remove the first correct node, which is to be removed.
Arc::make_mut(&mut pub_keys_rm).remove(&first_correct_node);
// Broadcast public keys of all nodes except for the node to be removed.
let _ = net.broadcast_input(
&Input::Change(Change::NodeChange(pub_keys_rm.clone())),
&mut rng,
);
// Broadcast the first half of the transactions.
for tx in 0..(num_txs / 2) {
let _ = net.broadcast_input(&Input::User(tx), &mut rng);
}
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes but the removed node.
let has_remove = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_rm,
_ => false,
})
};
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes, including the previously removed node.
let has_add = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_add,
_ => false,
})
};
// Returns `true` if the node has not output all changes or transactions yet.
let node_busy = |node: &Node<SQ>| {
!has_remove(node) || !has_add(node) || !node.algorithm().algo().queue().is_empty()
};
// All nodes await removal.
let mut awaiting_removal: BTreeSet<_> = net.correct_nodes().map(|node| *node.id()).collect();
// All nodes but the removed node await addition.
let mut awaiting_addition: BTreeSet<_> = net
.correct_nodes()
.map(|node| *node.id())
.filter(|id| *id != first_correct_node)
.collect();
// All, including the previously removed node, await the second half of transactions.
let mut awaiting_second_half: BTreeSet<_> = awaiting_removal.clone();
// Whether the first correct node was rejoined as a validator.
let mut rejoined_first_correct = false;
// The removed first correct node which is to be restarted as soon as all remaining
// validators agreed to add it back.
let mut saved_first_correct: Option<Node<SQ>> = None;
// Handle messages in random order until all nodes have output all transactions.
while net.correct_nodes().any(node_busy) {
let stepped_id = net.crank_expect(&mut rng).0;
if awaiting_removal.contains(&stepped_id) && has_remove(&net.get(stepped_id).unwrap()) {
awaiting_removal.remove(&stepped_id); | info!(
"{:?} has finished waiting for node removal; still waiting: {:?}",
stepped_id, awaiting_removal
);
if awaiting_removal.is_empty() {
info!("Removing first correct node from the test network");
saved_first_correct = net.remove_node(&first_correct_node);
}
// Vote to add the first correct node back.
if stepped_id != first_correct_node {
let _ = net.send_input(
stepped_id,
Input::Change(Change::NodeChange(pub_keys_add.clone())),
rng,
);
info!(
"Input the vote to add the first correct node into {:?} with netinfo {:?}",
stepped_id,
net.get(stepped_id).unwrap().algorithm().algo().netinfo()
);
}
}
if awaiting_removal.is_empty() && awaiting_addition.contains(&stepped_id) {
// If the stepped node started voting to add the first correct node back,
// take a note of that and rejoin it.
if let Some(join_plan) =
net.get(stepped_id)
.unwrap()
.outputs()
.iter()
.find_map(|batch| match batch.change() {
ChangeState::InProgress(Change::NodeChange(pub_keys))
if pub_keys == &pub_keys_add =>
{
batch.join_plan()
}
_ => None,
})
{
awaiting_addition.remove(&stepped_id);
info!(
"{:?} has finished waiting for node addition; still waiting: {:?}",
stepped_id, awaiting_addition
);
if awaiting_addition.is_empty() && !rejoined_first_correct {
let node = saved_first_correct
.take()
.expect("first correct node wasn't saved");
let step = restart_node_for_add(&mut net, node, join_plan, &mut rng);
net.process_step(first_correct_node, &step)
.expect("processing a step failed");
rejoined_first_correct = true;
}
}
}
if rejoined_first_correct && awaiting_second_half.contains(&stepped_id) {
// Input the second half of user transactions into the stepped node.
input_second_half(&mut net, stepped_id, num_txs, &mut rng);
awaiting_second_half.remove(&stepped_id);
}
}
let node_1 = net
.correct_nodes()
.nth(1)
.expect("second correct node is missing");
net.verify_batches(node_1);
}
/// Restarts specified node on the test network for adding it back as a validator.
fn restart_node_for_add<R, A>(
net: &mut VirtualNet<SQ, A>,
mut node: Node<SQ>,
join_plan: JoinPlan<NodeId>,
mut rng: &mut R,
) -> Step<QueueingHoneyBadger<usize, NodeId, Vec<usize>>>
where
R: rand::Rng,
A: Adversary<SQ>,
{
let our_id = *node.id();
println!("Restarting node {} with {:?}", node.id(), join_plan);
// TODO: When an observer node is added to the network, it should also be added to peer_ids.
let peer_ids: Vec<_> = net
.nodes()
.map(Node::id)
.filter(|id| *id != node.id())
.cloned()
.collect();
let secret_key = node.algorithm().algo().dyn_hb().secret_key().clone();
let (qhb, qhb_step) =
QueueingHoneyBadger::builder_joining(our_id, secret_key, join_plan, &mut rng)
.and_then(|builder| builder.batch_size(3).build(&mut rng))
.expect("failed to rebuild the node with a join plan");
let (sq, mut sq_step) = SenderQueue::builder(qhb, peer_ids.into_iter()).build(our_id);
*node.algorithm_mut() = sq;
sq_step.extend(qhb_step.map(|output| output, |fault| fault, Message::from));
net.insert_node(node);
sq_step
}
// Allow passing `netinfo` by value. `TestNetwork` expects this function signature.
#[allow(clippy::needless_pass_by_value)]
fn new_queueing_hb(node_info: NewNodeInfo<SQ>, seed: TestRngSeed) -> (SQ, Step<QHB>) {
let mut rng: TestRng = TestRng::from_seed(seed);
let peer_ids = node_info.netinfo.other_ids().cloned();
let netinfo = node_info.netinfo.clone();
let dhb =
DynamicHoneyBadger::builder().build(netinfo, node_info.secret_key, node_info.pub_keys);
let (qhb, qhb_step) = QueueingHoneyBadger::builder(dhb)
.batch_size(3)
.build(&mut rng)
.expect("failed to build QueueingHoneyBadger");
let our_id = *node_info.netinfo.our_id();
let (sq, mut step) = SenderQueue::builder(qhb, peer_ids).build(our_id);
let output = step.extend_with(qhb_step, |fault| fault, Message::from);
assert!(output.is_empty());
(sq, step)
}
fn test_queueing_honey_badger_different_sizes<A, F>(
new_adversary: F,
num_txs: usize,
seed: | random_line_split |
|
queueing_honey_badger.rs | if the node has not output all changes or transactions yet.
let node_busy = |node: &Node<SQ>| {
!has_remove(node) || !has_add(node) || !node.algorithm().algo().queue().is_empty()
};
// All nodes await removal.
let mut awaiting_removal: BTreeSet<_> = net.correct_nodes().map(|node| *node.id()).collect();
// All nodes but the removed node await addition.
let mut awaiting_addition: BTreeSet<_> = net
.correct_nodes()
.map(|node| *node.id())
.filter(|id| *id != first_correct_node)
.collect();
// All, including the previously removed node, await the second half of transactions.
let mut awaiting_second_half: BTreeSet<_> = awaiting_removal.clone();
// Whether the first correct node was rejoined as a validator.
let mut rejoined_first_correct = false;
// The removed first correct node which is to be restarted as soon as all remaining
// validators agreed to add it back.
let mut saved_first_correct: Option<Node<SQ>> = None;
// Handle messages in random order until all nodes have output all transactions.
while net.correct_nodes().any(node_busy) {
let stepped_id = net.crank_expect(&mut rng).0;
if awaiting_removal.contains(&stepped_id) && has_remove(&net.get(stepped_id).unwrap()) {
awaiting_removal.remove(&stepped_id);
info!(
"{:?} has finished waiting for node removal; still waiting: {:?}",
stepped_id, awaiting_removal
);
if awaiting_removal.is_empty() {
info!("Removing first correct node from the test network");
saved_first_correct = net.remove_node(&first_correct_node);
}
// Vote to add the first correct node back.
if stepped_id != first_correct_node {
let _ = net.send_input(
stepped_id,
Input::Change(Change::NodeChange(pub_keys_add.clone())),
rng,
);
info!(
"Input the vote to add the first correct node into {:?} with netinfo {:?}",
stepped_id,
net.get(stepped_id).unwrap().algorithm().algo().netinfo()
);
}
}
if awaiting_removal.is_empty() && awaiting_addition.contains(&stepped_id) {
// If the stepped node started voting to add the first correct node back,
// take a note of that and rejoin it.
if let Some(join_plan) =
net.get(stepped_id)
.unwrap()
.outputs()
.iter()
.find_map(|batch| match batch.change() {
ChangeState::InProgress(Change::NodeChange(pub_keys))
if pub_keys == &pub_keys_add =>
{
batch.join_plan()
}
_ => None,
})
{
awaiting_addition.remove(&stepped_id);
info!(
"{:?} has finished waiting for node addition; still waiting: {:?}",
stepped_id, awaiting_addition
);
if awaiting_addition.is_empty() && !rejoined_first_correct {
let node = saved_first_correct
.take()
.expect("first correct node wasn't saved");
let step = restart_node_for_add(&mut net, node, join_plan, &mut rng);
net.process_step(first_correct_node, &step)
.expect("processing a step failed");
rejoined_first_correct = true;
}
}
}
if rejoined_first_correct && awaiting_second_half.contains(&stepped_id) {
// Input the second half of user transactions into the stepped node.
input_second_half(&mut net, stepped_id, num_txs, &mut rng);
awaiting_second_half.remove(&stepped_id);
}
}
let node_1 = net
.correct_nodes()
.nth(1)
.expect("second correct node is missing");
net.verify_batches(node_1);
}
/// Restarts specified node on the test network for adding it back as a validator.
fn restart_node_for_add<R, A>(
net: &mut VirtualNet<SQ, A>,
mut node: Node<SQ>,
join_plan: JoinPlan<NodeId>,
mut rng: &mut R,
) -> Step<QueueingHoneyBadger<usize, NodeId, Vec<usize>>>
where
R: rand::Rng,
A: Adversary<SQ>,
{
let our_id = *node.id();
println!("Restarting node {} with {:?}", node.id(), join_plan);
// TODO: When an observer node is added to the network, it should also be added to peer_ids.
let peer_ids: Vec<_> = net
.nodes()
.map(Node::id)
.filter(|id| *id != node.id())
.cloned()
.collect();
let secret_key = node.algorithm().algo().dyn_hb().secret_key().clone();
let (qhb, qhb_step) =
QueueingHoneyBadger::builder_joining(our_id, secret_key, join_plan, &mut rng)
.and_then(|builder| builder.batch_size(3).build(&mut rng))
.expect("failed to rebuild the node with a join plan");
let (sq, mut sq_step) = SenderQueue::builder(qhb, peer_ids.into_iter()).build(our_id);
*node.algorithm_mut() = sq;
sq_step.extend(qhb_step.map(|output| output, |fault| fault, Message::from));
net.insert_node(node);
sq_step
}
// Allow passing `netinfo` by value. `TestNetwork` expects this function signature.
#[allow(clippy::needless_pass_by_value)]
fn new_queueing_hb(node_info: NewNodeInfo<SQ>, seed: TestRngSeed) -> (SQ, Step<QHB>) {
let mut rng: TestRng = TestRng::from_seed(seed);
let peer_ids = node_info.netinfo.other_ids().cloned();
let netinfo = node_info.netinfo.clone();
let dhb =
DynamicHoneyBadger::builder().build(netinfo, node_info.secret_key, node_info.pub_keys);
let (qhb, qhb_step) = QueueingHoneyBadger::builder(dhb)
.batch_size(3)
.build(&mut rng)
.expect("failed to build QueueingHoneyBadger");
let our_id = *node_info.netinfo.our_id();
let (sq, mut step) = SenderQueue::builder(qhb, peer_ids).build(our_id);
let output = step.extend_with(qhb_step, |fault| fault, Message::from);
assert!(output.is_empty());
(sq, step)
}
fn test_queueing_honey_badger_different_sizes<A, F>(
new_adversary: F,
num_txs: usize,
seed: TestRngSeed,
) where
A: Adversary<SQ>,
F: Fn() -> A,
{
// This returns an error in all but the first test.
let _ = env_logger::try_init();
let mut rng: TestRng = TestRng::from_seed(seed);
let sizes = vec![3, 5, rng.gen_range(6, 10)];
for size in sizes {
// The test is removing one correct node, so we allow fewer faulty ones.
let num_adv_nodes = util::max_faulty(size - 1);
let num_good_nodes = size - num_adv_nodes;
info!(
"Network size: {} good nodes, {} faulty nodes",
num_good_nodes, num_adv_nodes
);
let (net, _) = NetBuilder::new(0..size as u16)
.num_faulty(num_adv_nodes)
.message_limit(20_000 * size)
.no_time_limit()
.adversary(new_adversary())
.using_step(move |node_info: NewNodeInfo<_>| {
// Note: The "seed" variable is implicitly copied by the move closure.
// The "Copy" trait is *not* implemented for TestRng, which additionally
// needs to be mutable, while we are in a function which captures immutably.
// To avoid convoluted clone/borrow constructs we pass a TestRngSeed
// rather than a TestRng instance.
new_queueing_hb(node_info, seed)
})
.build(&mut rng)
.expect("Could not construct test network.");
test_queueing_honey_badger(net, num_txs, &mut rng);
}
}
proptest! {
#![proptest_config(ProptestConfig {
cases: 1, .. ProptestConfig::default()
})]
#[test]
#[allow(clippy::unnecessary_operation)]
fn test_queueing_honey_badger_random_delivery_silent(seed in gen_seed()) {
do_test_queueing_honey_badger_random_delivery_silent(seed)
}
#[test]
#[allow(clippy::unnecessary_operation)]
fn test_queueing_honey_badger_first_delivery_silent(seed in gen_seed()) {
do_test_queueing_honey_badger_first_delivery_silent(seed)
}
}
fn do_test_queueing_honey_badger_random_delivery_silent(seed: TestRngSeed) | {
test_queueing_honey_badger_different_sizes(ReorderingAdversary::new, 30, seed);
} | identifier_body |
|
queueing_honey_badger.rs | Net<SQ, A>,
id: NodeId,
num_txs: usize,
mut rng: &mut TestRng,
) where
A: Adversary<SQ>,
{
for tx in (num_txs / 2)..num_txs {
let _ = net.send_input(id, Input::User(tx), &mut rng);
}
}
/// Proposes `num_txs` values and expects nodes to output and order them.
fn test_queueing_honey_badger<A>(mut net: VirtualNet<SQ, A>, num_txs: usize, mut rng: &mut TestRng)
where
A: Adversary<SQ>,
{
// Make two copies of all public keys.
let pub_keys_add = net
.correct_nodes()
.nth(0)
.expect("At least one correct node needs to exist")
.algorithm()
.algo()
.dyn_hb()
.public_keys()
.clone();
let mut pub_keys_rm = pub_keys_add.clone();
// Get the first correct node id as candidate for removal/re-adding.
let first_correct_node = *net.correct_nodes().nth(0).unwrap().id();
// Remove the first correct node, which is to be removed.
Arc::make_mut(&mut pub_keys_rm).remove(&first_correct_node);
// Broadcast public keys of all nodes except for the node to be removed.
let _ = net.broadcast_input(
&Input::Change(Change::NodeChange(pub_keys_rm.clone())),
&mut rng,
);
// Broadcast the first half of the transactions.
for tx in 0..(num_txs / 2) {
let _ = net.broadcast_input(&Input::User(tx), &mut rng);
}
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes but the removed node.
let has_remove = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_rm,
_ => false,
})
};
// Closure for checking the output of a node for ChangeSet completion containing
// all nodes, including the previously removed node.
let has_add = |node: &Node<SQ>| {
node.outputs().iter().any(|batch| match batch.change() {
ChangeState::Complete(Change::NodeChange(pub_keys)) => pub_keys == &pub_keys_add,
_ => false,
})
};
// Returns `true` if the node has not output all changes or transactions yet.
let node_busy = |node: &Node<SQ>| {
!has_remove(node) || !has_add(node) || !node.algorithm().algo().queue().is_empty()
};
// All nodes await removal.
let mut awaiting_removal: BTreeSet<_> = net.correct_nodes().map(|node| *node.id()).collect();
// All nodes but the removed node await addition.
let mut awaiting_addition: BTreeSet<_> = net
.correct_nodes()
.map(|node| *node.id())
.filter(|id| *id != first_correct_node)
.collect();
// All, including the previously removed node, await the second half of transactions.
let mut awaiting_second_half: BTreeSet<_> = awaiting_removal.clone();
// Whether the first correct node was rejoined as a validator.
let mut rejoined_first_correct = false;
// The removed first correct node which is to be restarted as soon as all remaining
// validators agreed to add it back.
let mut saved_first_correct: Option<Node<SQ>> = None;
// Handle messages in random order until all nodes have output all transactions.
while net.correct_nodes().any(node_busy) {
let stepped_id = net.crank_expect(&mut rng).0;
if awaiting_removal.contains(&stepped_id) && has_remove(&net.get(stepped_id).unwrap()) {
awaiting_removal.remove(&stepped_id);
info!(
"{:?} has finished waiting for node removal; still waiting: {:?}",
stepped_id, awaiting_removal
);
if awaiting_removal.is_empty() {
info!("Removing first correct node from the test network");
saved_first_correct = net.remove_node(&first_correct_node);
}
// Vote to add the first correct node back.
if stepped_id != first_correct_node {
let _ = net.send_input(
stepped_id,
Input::Change(Change::NodeChange(pub_keys_add.clone())),
rng,
);
info!(
"Input the vote to add the first correct node into {:?} with netinfo {:?}",
stepped_id,
net.get(stepped_id).unwrap().algorithm().algo().netinfo()
);
}
}
if awaiting_removal.is_empty() && awaiting_addition.contains(&stepped_id) | stepped_id, awaiting_addition
);
if awaiting_addition.is_empty() && !rejoined_first_correct {
let node = saved_first_correct
.take()
.expect("first correct node wasn't saved");
let step = restart_node_for_add(&mut net, node, join_plan, &mut rng);
net.process_step(first_correct_node, &step)
.expect("processing a step failed");
rejoined_first_correct = true;
}
}
}
if rejoined_first_correct && awaiting_second_half.contains(&stepped_id) {
// Input the second half of user transactions into the stepped node.
input_second_half(&mut net, stepped_id, num_txs, &mut rng);
awaiting_second_half.remove(&stepped_id);
}
}
let node_1 = net
.correct_nodes()
.nth(1)
.expect("second correct node is missing");
net.verify_batches(node_1);
}
/// Restarts specified node on the test network for adding it back as a validator.
fn restart_node_for_add<R, A>(
net: &mut VirtualNet<SQ, A>,
mut node: Node<SQ>,
join_plan: JoinPlan<NodeId>,
mut rng: &mut R,
) -> Step<QueueingHoneyBadger<usize, NodeId, Vec<usize>>>
where
R: rand::Rng,
A: Adversary<SQ>,
{
let our_id = *node.id();
println!("Restarting node {} with {:?}", node.id(), join_plan);
// TODO: When an observer node is added to the network, it should also be added to peer_ids.
let peer_ids: Vec<_> = net
.nodes()
.map(Node::id)
.filter(|id| *id != node.id())
.cloned()
.collect();
let secret_key = node.algorithm().algo().dyn_hb().secret_key().clone();
let (qhb, qhb_step) =
QueueingHoneyBadger::builder_joining(our_id, secret_key, join_plan, &mut rng)
.and_then(|builder| builder.batch_size(3).build(&mut rng))
.expect("failed to rebuild the node with a join plan");
let (sq, mut sq_step) = SenderQueue::builder(qhb, peer_ids.into_iter()).build(our_id);
*node.algorithm_mut() = sq;
sq_step.extend(qhb_step.map(|output| output, |fault| fault, Message::from));
net.insert_node(node);
sq_step
}
// Allow passing `netinfo` by value. `TestNetwork` expects this function signature.
#[allow(clippy::needless_pass_by_value)]
fn new_queueing_hb(node_info: NewNodeInfo<SQ>, seed: TestRngSeed) -> (SQ, Step<QHB>) {
let mut rng: TestRng = TestRng::from_seed(seed);
let peer_ids = node_info.netinfo.other_ids().cloned();
let netinfo = node_info.netinfo.clone();
let dhb =
DynamicHoneyBadger::builder().build(netinfo, node_info.secret_key, node_info.pub_keys);
let (qhb, qhb_step) = QueueingHoneyBadger::builder(dhb)
.batch_size(3)
.build(&mut rng)
.expect("failed to build QueueingHoneyBadger");
let our_id = *node_info.netinfo.our_id();
let (sq, mut step) = SenderQueue::builder(qhb, peer_ids).build(our_id);
let output = step.extend_with(qhb_step, |fault| fault, Message::from);
assert!(output.is_empty());
(sq, step)
}
fn test_queueing_honey_badger_different_sizes<A, F>(
new_adversary: F,
num_txs: usize,
seed: | {
// If the stepped node started voting to add the first correct node back,
// take a note of that and rejoin it.
if let Some(join_plan) =
net.get(stepped_id)
.unwrap()
.outputs()
.iter()
.find_map(|batch| match batch.change() {
ChangeState::InProgress(Change::NodeChange(pub_keys))
if pub_keys == &pub_keys_add =>
{
batch.join_plan()
}
_ => None,
})
{
awaiting_addition.remove(&stepped_id);
info!(
"{:?} has finished waiting for node addition; still waiting: {:?}", | conditional_block |
lru_cache.rs | //! assert_eq!(*cache.get(&2).unwrap(), 22);
//!
//! cache.put(6, 60);
//! assert!(cache.get(&3).is_none());
//!
//! cache.change_capacity(1);
//! assert!(cache.get(&2).is_none());
//! ```
use std::cast;
use std::container::Container;
use std::hash::Hash;
use std::fmt;
use std::ptr;
use HashMap;
struct KeyRef<K> { k: *K }
struct LruEntry<K, V> {
key: Option<K>,
value: Option<V>,
next: *mut LruEntry<K, V>,
prev: *mut LruEntry<K, V>,
}
/// An LRU Cache.
pub struct LruCache<K, V> {
map: HashMap<KeyRef<K>, ~LruEntry<K, V>>,
max_size: uint,
head: *mut LruEntry<K, V>,
tail: *mut LruEntry<K, V>,
}
impl<S, K: Hash<S>> Hash<S> for KeyRef<K> {
fn hash(&self, state: &mut S) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: Eq> Eq for KeyRef<K> {
fn eq(&self, other: &KeyRef<K>) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: TotalEq> TotalEq for KeyRef<K> {}
impl<K, V> LruEntry<K, V> {
fn new() -> LruEntry<K, V> {
LruEntry {
key: None,
value: None,
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
fn with_key_value(k: K, v: V) -> LruEntry<K, V> {
LruEntry {
key: Some(k),
value: Some(v),
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
}
impl<K: Hash + TotalEq, V> LruCache<K, V> {
/// Create an LRU Cache that holds at most `capacity` items.
pub fn new(capacity: uint) -> LruCache<K, V> {
let cache = LruCache {
map: HashMap::new(),
max_size: capacity,
head: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
tail: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
};
unsafe {
(*cache.head).next = cache.tail;
(*cache.tail).prev = cache.head;
}
return cache;
}
/// Put a key-value pair into cache.
pub fn put(&mut self, k: K, v: V) {
let mut key_existed = false;
let (node_ptr, node_opt) = match self.map.find_mut(&KeyRef{k: &k}) {
Some(node) => {
key_existed = true;
node.value = Some(v);
let node_ptr: *mut LruEntry<K, V> = &mut **node;
(node_ptr, None)
}
None => {
let mut node = ~LruEntry::with_key_value(k, v);
let node_ptr: *mut LruEntry<K, V> = &mut *node;
(node_ptr, Some(node))
}
};
if key_existed {
self.detach(node_ptr);
self.attach(node_ptr);
} else {
let keyref = unsafe { (*node_ptr).key.as_ref().unwrap() };
self.map.swap(KeyRef{k: keyref}, node_opt.unwrap());
self.attach(node_ptr);
if self.len() > self.capacity() {
self.remove_lru();
}
}
}
/// Return a value corresponding to the key in the cache.
pub fn get<'a>(&'a mut self, k: &K) -> Option<&'a V> {
let (value, node_ptr_opt) = match self.map.find_mut(&KeyRef{k: k}) {
None => (None, None),
Some(node) => {
let node_ptr: *mut LruEntry<K, V> = &mut **node;
unsafe {
match (*node_ptr).value {
None => (None, None),
Some(ref value) => (Some(value), Some(node_ptr))
}
}
}
};
match node_ptr_opt {
None => (),
Some(node_ptr) => {
self.detach(node_ptr);
self.attach(node_ptr);
}
}
return value;
}
/// Remove and return a value corresponding to the key from the cache.
pub fn pop(&mut self, k: &K) -> Option<V> {
match self.map.pop(&KeyRef{k: k}) {
None => None,
Some(lru_entry) => lru_entry.value
}
}
/// Return the maximum number of key-value pairs the cache can hold.
pub fn capacity(&self) -> uint {
self.max_size
}
/// Change the number of key-value pairs the cache can hold. Remove
/// least-recently-used key-value pairs if necessary.
pub fn change_capacity(&mut self, capacity: uint) {
for _ in range(capacity, self.len()) {
self.remove_lru();
}
self.max_size = capacity;
}
#[inline]
fn remove_lru(&mut self) {
if self.len() > 0 {
let lru = unsafe { (*self.tail).prev };
self.detach(lru);
unsafe {
match (*lru).key {
None => (),
Some(ref k) => { self.map.pop(&KeyRef{k: k}); }
}
}
}
}
#[inline]
fn detach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
}
impl<A: fmt::Show + Hash + TotalEq, B: fmt::Show> fmt::Show for LruCache<A, B> {
/// Return a string that lists the key-value pairs from most-recently
/// used to least-recently used.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f.buf, r"\{"));
let mut cur = self.head;
for i in range(0, self.len()) {
if i > 0 { try!(write!(f.buf, ", ")) }
unsafe {
cur = (*cur).next;
match (*cur).key {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref k) => try!(write!(f.buf, "{}", *k)),
}
}
try!(write!(f.buf, ": "));
unsafe {
match (*cur).value {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref value) => try!(write!(f.buf, "{}", *value)),
}
}
}
write!(f.buf, r"\}")
}
}
impl<K: Hash + TotalEq, V> Container for LruCache<K, V> {
/// Return the number of key-value pairs in the cache.
fn len(&self) -> uint {
self.map.len()
}
}
impl<K: Hash + TotalEq, V> Mutable for LruCache<K, V> {
/// Clear the cache of all key-value pairs.
fn clear(&mut self) {
self.map.clear();
}
}
#[unsafe_destructor]
impl<K, V> Drop for LruCache<K, V> {
fn | (&mut self) {
unsafe {
let _: ~LruEntry<K, V> = cast::transmute(self.head);
let _: ~LruEntry<K, V> = cast::transmute(self.tail);
}
}
}
#[cfg(test)]
mod tests {
use super::LruCache;
fn assert_opt_eq<V: Eq>(opt: Option<&V>, v: V) {
assert!(opt.is_some());
assert!(opt.unwrap() == &v);
}
#[test]
fn test_put_and_get() {
let mut cache: LruCache<int, int> = LruCache::new(2);
cache.put(1, 10);
cache.put(2, 20);
assert_opt_eq(cache.get(&1), 10);
assert_opt_eq(cache.get(&2), 20);
assert_eq!(cache.len(), 2);
}
#[test]
fn test_put_update() {
let mut cache: LruCache<~str, Vec<u8>> = LruCache::new(1);
cache.put(~"1", vec![10, 10]);
cache.put(~"1", vec![10, 19]);
assert_opt_eq(cache.get(&~"1"), vec![1 | drop | identifier_name |
lru_cache.rs | //! assert_eq!(*cache.get(&2).unwrap(), 22);
//!
//! cache.put(6, 60);
//! assert!(cache.get(&3).is_none());
//!
//! cache.change_capacity(1);
//! assert!(cache.get(&2).is_none());
//! ```
use std::cast;
use std::container::Container;
use std::hash::Hash;
use std::fmt;
use std::ptr;
use HashMap;
struct KeyRef<K> { k: *K }
struct LruEntry<K, V> {
key: Option<K>,
value: Option<V>,
next: *mut LruEntry<K, V>,
prev: *mut LruEntry<K, V>,
}
/// An LRU Cache.
pub struct LruCache<K, V> {
map: HashMap<KeyRef<K>, ~LruEntry<K, V>>,
max_size: uint,
head: *mut LruEntry<K, V>,
tail: *mut LruEntry<K, V>,
}
impl<S, K: Hash<S>> Hash<S> for KeyRef<K> {
fn hash(&self, state: &mut S) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: Eq> Eq for KeyRef<K> {
fn eq(&self, other: &KeyRef<K>) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: TotalEq> TotalEq for KeyRef<K> {}
impl<K, V> LruEntry<K, V> {
fn new() -> LruEntry<K, V> {
LruEntry {
key: None,
value: None,
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
fn with_key_value(k: K, v: V) -> LruEntry<K, V> {
LruEntry {
key: Some(k),
value: Some(v),
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
}
impl<K: Hash + TotalEq, V> LruCache<K, V> {
/// Create an LRU Cache that holds at most `capacity` items.
pub fn new(capacity: uint) -> LruCache<K, V> {
let cache = LruCache {
map: HashMap::new(),
max_size: capacity,
head: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
tail: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
};
unsafe {
(*cache.head).next = cache.tail;
(*cache.tail).prev = cache.head;
}
return cache;
}
/// Put a key-value pair into cache.
pub fn put(&mut self, k: K, v: V) {
let mut key_existed = false;
let (node_ptr, node_opt) = match self.map.find_mut(&KeyRef{k: &k}) {
Some(node) => {
key_existed = true;
node.value = Some(v);
let node_ptr: *mut LruEntry<K, V> = &mut **node;
(node_ptr, None)
}
None => {
let mut node = ~LruEntry::with_key_value(k, v);
let node_ptr: *mut LruEntry<K, V> = &mut *node;
(node_ptr, Some(node))
}
};
if key_existed {
self.detach(node_ptr);
self.attach(node_ptr);
} else {
let keyref = unsafe { (*node_ptr).key.as_ref().unwrap() };
self.map.swap(KeyRef{k: keyref}, node_opt.unwrap());
self.attach(node_ptr);
if self.len() > self.capacity() {
self.remove_lru();
}
}
}
/// Return a value corresponding to the key in the cache.
pub fn get<'a>(&'a mut self, k: &K) -> Option<&'a V> {
let (value, node_ptr_opt) = match self.map.find_mut(&KeyRef{k: k}) {
None => (None, None),
Some(node) => {
let node_ptr: *mut LruEntry<K, V> = &mut **node;
unsafe {
match (*node_ptr).value {
None => (None, None),
Some(ref value) => (Some(value), Some(node_ptr))
}
}
}
};
match node_ptr_opt {
None => (),
Some(node_ptr) => {
self.detach(node_ptr);
self.attach(node_ptr);
}
}
return value;
}
/// Remove and return a value corresponding to the key from the cache.
pub fn pop(&mut self, k: &K) -> Option<V> {
match self.map.pop(&KeyRef{k: k}) {
None => None,
Some(lru_entry) => lru_entry.value
}
}
/// Return the maximum number of key-value pairs the cache can hold.
pub fn capacity(&self) -> uint {
self.max_size
}
/// Change the number of key-value pairs the cache can hold. Remove
/// least-recently-used key-value pairs if necessary.
pub fn change_capacity(&mut self, capacity: uint) {
for _ in range(capacity, self.len()) {
self.remove_lru();
}
self.max_size = capacity;
}
#[inline]
fn remove_lru(&mut self) {
if self.len() > 0 {
let lru = unsafe { (*self.tail).prev };
self.detach(lru);
unsafe {
match (*lru).key {
None => (),
Some(ref k) => { self.map.pop(&KeyRef{k: k}); }
}
}
}
}
#[inline]
fn detach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
}
impl<A: fmt::Show + Hash + TotalEq, B: fmt::Show> fmt::Show for LruCache<A, B> {
/// Return a string that lists the key-value pairs from most-recently
/// used to least-recently used.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f.buf, r"\{"));
let mut cur = self.head;
for i in range(0, self.len()) {
if i > 0 |
unsafe {
cur = (*cur).next;
match (*cur).key {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref k) => try!(write!(f.buf, "{}", *k)),
}
}
try!(write!(f.buf, ": "));
unsafe {
match (*cur).value {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref value) => try!(write!(f.buf, "{}", *value)),
}
}
}
write!(f.buf, r"\}")
}
}
impl<K: Hash + TotalEq, V> Container for LruCache<K, V> {
/// Return the number of key-value pairs in the cache.
fn len(&self) -> uint {
self.map.len()
}
}
impl<K: Hash + TotalEq, V> Mutable for LruCache<K, V> {
/// Clear the cache of all key-value pairs.
fn clear(&mut self) {
self.map.clear();
}
}
#[unsafe_destructor]
impl<K, V> Drop for LruCache<K, V> {
fn drop(&mut self) {
unsafe {
let _: ~LruEntry<K, V> = cast::transmute(self.head);
let _: ~LruEntry<K, V> = cast::transmute(self.tail);
}
}
}
#[cfg(test)]
mod tests {
use super::LruCache;
fn assert_opt_eq<V: Eq>(opt: Option<&V>, v: V) {
assert!(opt.is_some());
assert!(opt.unwrap() == &v);
}
#[test]
fn test_put_and_get() {
let mut cache: LruCache<int, int> = LruCache::new(2);
cache.put(1, 10);
cache.put(2, 20);
assert_opt_eq(cache.get(&1), 10);
assert_opt_eq(cache.get(&2), 20);
assert_eq!(cache.len(), 2);
}
#[test]
fn test_put_update() {
let mut cache: LruCache<~str, Vec<u8>> = LruCache::new(1);
cache.put(~"1", vec![10, 10]);
cache.put(~"1", vec![10, 19]);
assert_opt_eq(cache.get(&~"1"), vec![ | { try!(write!(f.buf, ", ")) } | conditional_block |
lru_cache.rs | //! assert_eq!(*cache.get(&2).unwrap(), 22);
//!
//! cache.put(6, 60);
//! assert!(cache.get(&3).is_none());
//!
//! cache.change_capacity(1);
//! assert!(cache.get(&2).is_none());
//! ```
use std::cast;
use std::container::Container;
use std::hash::Hash;
use std::fmt;
use std::ptr;
use HashMap;
struct KeyRef<K> { k: *K }
struct LruEntry<K, V> {
key: Option<K>,
value: Option<V>,
next: *mut LruEntry<K, V>,
prev: *mut LruEntry<K, V>,
}
/// An LRU Cache.
pub struct LruCache<K, V> {
map: HashMap<KeyRef<K>, ~LruEntry<K, V>>,
max_size: uint,
head: *mut LruEntry<K, V>,
tail: *mut LruEntry<K, V>,
}
impl<S, K: Hash<S>> Hash<S> for KeyRef<K> {
fn hash(&self, state: &mut S) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: Eq> Eq for KeyRef<K> {
fn eq(&self, other: &KeyRef<K>) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: TotalEq> TotalEq for KeyRef<K> {}
impl<K, V> LruEntry<K, V> {
fn new() -> LruEntry<K, V> {
LruEntry {
key: None,
value: None,
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
fn with_key_value(k: K, v: V) -> LruEntry<K, V> {
LruEntry {
key: Some(k),
value: Some(v),
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
}
impl<K: Hash + TotalEq, V> LruCache<K, V> {
/// Create an LRU Cache that holds at most `capacity` items.
pub fn new(capacity: uint) -> LruCache<K, V> {
let cache = LruCache {
map: HashMap::new(),
max_size: capacity,
head: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
tail: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
};
unsafe {
(*cache.head).next = cache.tail;
(*cache.tail).prev = cache.head;
}
return cache;
}
/// Put a key-value pair into cache.
pub fn put(&mut self, k: K, v: V) {
let mut key_existed = false;
let (node_ptr, node_opt) = match self.map.find_mut(&KeyRef{k: &k}) {
Some(node) => {
key_existed = true;
node.value = Some(v);
let node_ptr: *mut LruEntry<K, V> = &mut **node;
(node_ptr, None)
}
None => {
let mut node = ~LruEntry::with_key_value(k, v);
let node_ptr: *mut LruEntry<K, V> = &mut *node;
(node_ptr, Some(node))
}
};
if key_existed {
self.detach(node_ptr);
self.attach(node_ptr);
} else {
let keyref = unsafe { (*node_ptr).key.as_ref().unwrap() };
self.map.swap(KeyRef{k: keyref}, node_opt.unwrap());
self.attach(node_ptr);
if self.len() > self.capacity() {
self.remove_lru();
}
}
}
/// Return a value corresponding to the key in the cache.
pub fn get<'a>(&'a mut self, k: &K) -> Option<&'a V> {
let (value, node_ptr_opt) = match self.map.find_mut(&KeyRef{k: k}) {
None => (None, None),
Some(node) => {
let node_ptr: *mut LruEntry<K, V> = &mut **node;
unsafe {
match (*node_ptr).value {
None => (None, None),
Some(ref value) => (Some(value), Some(node_ptr))
}
}
}
};
match node_ptr_opt {
None => (),
Some(node_ptr) => {
self.detach(node_ptr);
self.attach(node_ptr);
}
}
return value;
}
/// Remove and return a value corresponding to the key from the cache.
pub fn pop(&mut self, k: &K) -> Option<V> {
match self.map.pop(&KeyRef{k: k}) {
None => None,
Some(lru_entry) => lru_entry.value
}
}
/// Return the maximum number of key-value pairs the cache can hold.
pub fn capacity(&self) -> uint {
self.max_size
}
/// Change the number of key-value pairs the cache can hold. Remove
/// least-recently-used key-value pairs if necessary.
pub fn change_capacity(&mut self, capacity: uint) {
for _ in range(capacity, self.len()) {
self.remove_lru();
}
self.max_size = capacity;
}
#[inline]
fn remove_lru(&mut self) {
if self.len() > 0 {
let lru = unsafe { (*self.tail).prev };
self.detach(lru);
unsafe {
match (*lru).key {
None => (),
Some(ref k) => { self.map.pop(&KeyRef{k: k}); }
}
}
}
}
#[inline]
fn detach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
}
impl<A: fmt::Show + Hash + TotalEq, B: fmt::Show> fmt::Show for LruCache<A, B> {
/// Return a string that lists the key-value pairs from most-recently
/// used to least-recently used.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f.buf, r"\{"));
let mut cur = self.head;
for i in range(0, self.len()) {
if i > 0 { try!(write!(f.buf, ", ")) }
unsafe {
cur = (*cur).next;
match (*cur).key {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref k) => try!(write!(f.buf, "{}", *k)),
}
}
try!(write!(f.buf, ": "));
unsafe {
match (*cur).value {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref value) => try!(write!(f.buf, "{}", *value)),
}
}
} | write!(f.buf, r"\}")
}
}
impl<K: Hash + TotalEq, V> Container for LruCache<K, V> {
/// Return the number of key-value pairs in the cache.
fn len(&self) -> uint {
self.map.len()
}
}
impl<K: Hash + TotalEq, V> Mutable for LruCache<K, V> {
/// Clear the cache of all key-value pairs.
fn clear(&mut self) {
self.map.clear();
}
}
#[unsafe_destructor]
impl<K, V> Drop for LruCache<K, V> {
fn drop(&mut self) {
unsafe {
let _: ~LruEntry<K, V> = cast::transmute(self.head);
let _: ~LruEntry<K, V> = cast::transmute(self.tail);
}
}
}
#[cfg(test)]
mod tests {
use super::LruCache;
fn assert_opt_eq<V: Eq>(opt: Option<&V>, v: V) {
assert!(opt.is_some());
assert!(opt.unwrap() == &v);
}
#[test]
fn test_put_and_get() {
let mut cache: LruCache<int, int> = LruCache::new(2);
cache.put(1, 10);
cache.put(2, 20);
assert_opt_eq(cache.get(&1), 10);
assert_opt_eq(cache.get(&2), 20);
assert_eq!(cache.len(), 2);
}
#[test]
fn test_put_update() {
let mut cache: LruCache<~str, Vec<u8>> = LruCache::new(1);
cache.put(~"1", vec![10, 10]);
cache.put(~"1", vec![10, 19]);
assert_opt_eq(cache.get(&~"1"), vec![10 | random_line_split |
|
lru_cache.rs | //! assert_eq!(*cache.get(&2).unwrap(), 22);
//!
//! cache.put(6, 60);
//! assert!(cache.get(&3).is_none());
//!
//! cache.change_capacity(1);
//! assert!(cache.get(&2).is_none());
//! ```
use std::cast;
use std::container::Container;
use std::hash::Hash;
use std::fmt;
use std::ptr;
use HashMap;
struct KeyRef<K> { k: *K }
struct LruEntry<K, V> {
key: Option<K>,
value: Option<V>,
next: *mut LruEntry<K, V>,
prev: *mut LruEntry<K, V>,
}
/// An LRU Cache.
pub struct LruCache<K, V> {
map: HashMap<KeyRef<K>, ~LruEntry<K, V>>,
max_size: uint,
head: *mut LruEntry<K, V>,
tail: *mut LruEntry<K, V>,
}
impl<S, K: Hash<S>> Hash<S> for KeyRef<K> {
fn hash(&self, state: &mut S) {
unsafe { (*self.k).hash(state) }
}
}
impl<K: Eq> Eq for KeyRef<K> {
fn eq(&self, other: &KeyRef<K>) -> bool {
unsafe{ (*self.k).eq(&*other.k) }
}
}
impl<K: TotalEq> TotalEq for KeyRef<K> {}
impl<K, V> LruEntry<K, V> {
fn new() -> LruEntry<K, V> {
LruEntry {
key: None,
value: None,
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
fn with_key_value(k: K, v: V) -> LruEntry<K, V> {
LruEntry {
key: Some(k),
value: Some(v),
next: ptr::mut_null(),
prev: ptr::mut_null(),
}
}
}
impl<K: Hash + TotalEq, V> LruCache<K, V> {
/// Create an LRU Cache that holds at most `capacity` items.
pub fn new(capacity: uint) -> LruCache<K, V> {
let cache = LruCache {
map: HashMap::new(),
max_size: capacity,
head: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
tail: unsafe{ cast::transmute(~LruEntry::<K, V>::new()) },
};
unsafe {
(*cache.head).next = cache.tail;
(*cache.tail).prev = cache.head;
}
return cache;
}
/// Put a key-value pair into cache.
pub fn put(&mut self, k: K, v: V) {
let mut key_existed = false;
let (node_ptr, node_opt) = match self.map.find_mut(&KeyRef{k: &k}) {
Some(node) => {
key_existed = true;
node.value = Some(v);
let node_ptr: *mut LruEntry<K, V> = &mut **node;
(node_ptr, None)
}
None => {
let mut node = ~LruEntry::with_key_value(k, v);
let node_ptr: *mut LruEntry<K, V> = &mut *node;
(node_ptr, Some(node))
}
};
if key_existed {
self.detach(node_ptr);
self.attach(node_ptr);
} else {
let keyref = unsafe { (*node_ptr).key.as_ref().unwrap() };
self.map.swap(KeyRef{k: keyref}, node_opt.unwrap());
self.attach(node_ptr);
if self.len() > self.capacity() {
self.remove_lru();
}
}
}
/// Return a value corresponding to the key in the cache.
pub fn get<'a>(&'a mut self, k: &K) -> Option<&'a V> {
let (value, node_ptr_opt) = match self.map.find_mut(&KeyRef{k: k}) {
None => (None, None),
Some(node) => {
let node_ptr: *mut LruEntry<K, V> = &mut **node;
unsafe {
match (*node_ptr).value {
None => (None, None),
Some(ref value) => (Some(value), Some(node_ptr))
}
}
}
};
match node_ptr_opt {
None => (),
Some(node_ptr) => {
self.detach(node_ptr);
self.attach(node_ptr);
}
}
return value;
}
/// Remove and return a value corresponding to the key from the cache.
pub fn pop(&mut self, k: &K) -> Option<V> {
match self.map.pop(&KeyRef{k: k}) {
None => None,
Some(lru_entry) => lru_entry.value
}
}
/// Return the maximum number of key-value pairs the cache can hold.
pub fn capacity(&self) -> uint {
self.max_size
}
/// Change the number of key-value pairs the cache can hold. Remove
/// least-recently-used key-value pairs if necessary.
pub fn change_capacity(&mut self, capacity: uint) {
for _ in range(capacity, self.len()) {
self.remove_lru();
}
self.max_size = capacity;
}
#[inline]
fn remove_lru(&mut self) |
#[inline]
fn detach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*(*node).prev).next = (*node).next;
(*(*node).next).prev = (*node).prev;
}
}
#[inline]
fn attach(&mut self, node: *mut LruEntry<K, V>) {
unsafe {
(*node).next = (*self.head).next;
(*node).prev = self.head;
(*self.head).next = node;
(*(*node).next).prev = node;
}
}
}
impl<A: fmt::Show + Hash + TotalEq, B: fmt::Show> fmt::Show for LruCache<A, B> {
/// Return a string that lists the key-value pairs from most-recently
/// used to least-recently used.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
try!(write!(f.buf, r"\{"));
let mut cur = self.head;
for i in range(0, self.len()) {
if i > 0 { try!(write!(f.buf, ", ")) }
unsafe {
cur = (*cur).next;
match (*cur).key {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref k) => try!(write!(f.buf, "{}", *k)),
}
}
try!(write!(f.buf, ": "));
unsafe {
match (*cur).value {
// should never print nil
None => try!(write!(f.buf, "nil")),
Some(ref value) => try!(write!(f.buf, "{}", *value)),
}
}
}
write!(f.buf, r"\}")
}
}
impl<K: Hash + TotalEq, V> Container for LruCache<K, V> {
/// Return the number of key-value pairs in the cache.
fn len(&self) -> uint {
self.map.len()
}
}
impl<K: Hash + TotalEq, V> Mutable for LruCache<K, V> {
/// Clear the cache of all key-value pairs.
fn clear(&mut self) {
self.map.clear();
}
}
#[unsafe_destructor]
impl<K, V> Drop for LruCache<K, V> {
fn drop(&mut self) {
unsafe {
let _: ~LruEntry<K, V> = cast::transmute(self.head);
let _: ~LruEntry<K, V> = cast::transmute(self.tail);
}
}
}
#[cfg(test)]
mod tests {
use super::LruCache;
fn assert_opt_eq<V: Eq>(opt: Option<&V>, v: V) {
assert!(opt.is_some());
assert!(opt.unwrap() == &v);
}
#[test]
fn test_put_and_get() {
let mut cache: LruCache<int, int> = LruCache::new(2);
cache.put(1, 10);
cache.put(2, 20);
assert_opt_eq(cache.get(&1), 10);
assert_opt_eq(cache.get(&2), 20);
assert_eq!(cache.len(), 2);
}
#[test]
fn test_put_update() {
let mut cache: LruCache<~str, Vec<u8>> = LruCache::new(1);
cache.put(~"1", vec![10, 10]);
cache.put(~"1", vec![10, 19]);
assert_opt_eq(cache.get(&~"1"), vec![ | {
if self.len() > 0 {
let lru = unsafe { (*self.tail).prev };
self.detach(lru);
unsafe {
match (*lru).key {
None => (),
Some(ref k) => { self.map.pop(&KeyRef{k: k}); }
}
}
}
} | identifier_body |
chartdata.py | __allow_access_to_unprotected_subobjects__ = 1
'''
EVENT TYPES
'''
# EVENT TYPES
CREATION = 1000
# set as attributes for EventBrain instances.
metadata = ['date_year', 'date_month', 'date_day', 'related_object_id',
'path', 'patient_id', 'id', 'event_type', 'meta_type']
def __init__(self, patient, ev_type, date, related_obj, author=None):
'''
author param is used only in migration of ChartItemEventWrapper's
'''
self.portal = getSite()
self.cct = getToolByName(self.portal, 'cmed_catalog_tool')
self.patient_id = patient.getId()
self.date = date
self.related_obj = related_obj
# author input keyword param is seted only when importing ChartDataItemWrapper's
if author:
self.author = author
else:
self.author = related_obj.getOwner().getId()
self.type = ev_type
# indexes and metadata
# self.event_text = self.eprint() # commenting: this is problemetic when migrating, since the object isnt in catalog yet. (ATBlob early creation)
self.date_year = date.year()
self.date_month = date.month()
self.date_day = date.day()
self.path = self.event_url()
self.event_type = self.type
self.meta_type = self.related_obj.meta_type
self.related_object_id = self.related_obj.getId()
# this attributes gonna replace self.related_obj soon.
# we gonna do this cause a reference for an object is not the right thing to do, in many
# cases can lead to bugs difficult to debug. ro = related_object.
# self.ro_id = self.related_obj.getId()
# self.ro_uid = self.related_obj.UID()
# self.ro_meta_type = self.related_obj.meta_type
self.catalog_me()
def catalog_me(self):
'''
index an event (through an EventBrain) in event_catalog.
'''
self.id = self.cct.event_catalog_map.new_docid()
self.cct.event_catalog_map.add(EventBrain(self), self.id)
self.cct.event_catalog.index_doc(self.id, self)
def get_contextualized_object(self):
'''
Used to workaround the problem of objects that getPhysicalPath doesnt work
properly. Returns a catalog brain object.
'''
uid = self.related_obj.UID()
portal_catalog = self.portal.portal_catalog
try:
return portal_catalog.search(dict(UID=uid))[0]
except IndexError:
return None
def event_url(self):
'''
used to solve a problem in some urls with absolute_url_path().
'''
portal_url = '/'.join(self.portal.getPhysicalPath())
patient_url = portal_url + '/Patients/' + self.patient_id
chart_url = patient_url + '/chartFolder_hidden'
if self.related_obj.meta_type == 'ChartItemEventWrapper':
return chart_url + self.related_obj.url_sufix
else:
contextualized_object = self.get_contextualized_object()
if contextualized_object != None:
return contextualized_object.getPath()
return None
def eprint(self):
'''
returns html to be printed in screen
'''
event_url = self.event_url()
if event_url == None:
klass = 'obj_deleted' # related object deleted.
event_url = ''
else:
klass = ''
if self.related_obj.meta_type == 'Visit':
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.getVisit_type())
else:
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.Title())
return self.prefix() + related_obj + self.posfix()
def prefix(self):
'''
called by eprint.
'''
# necessary to be here (and not in the header), since medicaldocument import chartdata too.
from wres.archetypes.content.medicaldocument import MedicalDocument
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return ''
elif self.related_obj.meta_type == 'Patient':
return 'Paciente '
elif isinstance(self.related_obj, ChartItemEventWrapper):
return self.related_obj.prefix
elif isinstance(self.related_obj, MedicalDocument):
return 'Documento '
elif self.related_obj.portal_type == 'Image':
return 'Imagem '
elif self.related_obj.portal_type == 'File':
return 'Arquivo '
return ''
def posfix(self):
'''
called by eprint
'''
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return self._visit_review_state()
else:
return ' adicionado.'
def getAuthor(self):
'''
If not admin, return the related object of the self.author.
'''
# admin doesnt have a member neither a related_object.
if self.author == 'admin':
return 'admin'
mt = getToolByName(self.portal, 'portal_membership')
member = mt.getMemberById(self.author)
return self.portal.unrestrictedTraverse(member.getProperty('related_object'))
def _visit_review_state(self):
'''
used only for visits.
'''
pw = getToolByName(self.portal, 'portal_workflow')
wf = getattr(pw, 'appointment_workflow')
pc = getToolByName(self.portal, 'portal_catalog')
# o getId algumas vezes retornava mais de um objeto nesta pesquisa. Por
# isso mudamos para UID.
brains = pc.search({'UID': self.related_obj.UID()})
if len(brains) > 1:
raise Exception('I found more than 1 visit with the same id.')
brain = brains[0]
state = getattr(wf.states, brain.review_state)
return ' (' + state.title_or_id().lower() + ').'
def _event_cmp(ev1, ev2):
'''
used for sorting events in patient.py
'''
if ev1.date < ev2.date:
return -1
if ev1.date == ev2.date:
return 0
else:
return 1
def export_dict(self):
'''
function that transform an event instance in a dictionary to be exported.
'''
if isinstance(self.related_obj, ChartItemEventWrapper):
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.meta_type,
'mapping_name' : self.related_obj.mapping_name, 'prefix' : self.related_obj.prefix,
'title' : self.related_obj.title, 'url_sufix' : self.related_obj.url_sufix,}
else:
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.getId()}
class ChartItemEventWrapper:
'''
wrapper for creating chart_data events.
'''
meta_type = 'ChartItemEventWrapper'
def __init__(self, mapping_name, patient, **object):
if mapping_name == 'medications':
self.prefix = 'Medicamento '
self.title = object['medication']
self.url_sufix = '/show_medications'
elif mapping_name == 'problems':
self.prefix = 'Diagnóstico '
self.title = object['problem']
self.url_sufix = '/show_problem_list'
elif mapping_name == 'allergies':
self.prefix = 'Alergia '
self.title = object['allergy']
self.url_sufix = '/show_allergies'
elif mapping_name == 'laboratory':
self.prefix = 'Exame '
self.title = object['exam']
self.url_sufix = '/show_exams'
elif mapping_name == 'prescriptions':
self.prefix = ''
self.title = 'Prescrição'
self.url_sufix = '/show_medications'
self.mapping_name = mapping_name
self.patient = patient
self.id = self.patient.getId() + '_' + mapping_name + '_' + self.title
def getId(self):
return self.id
def getOwner(self):
por | def Title(self):
return self.title
# TODO: Remover a partir de 01/04/2013
# def absolute_url_path(self):
# chart_folder = self.patient.chartFolder
# return chart_folder.absolute_url_path() + self.url_sufix
class ChartData(Persistent):
__allow_access_to_unprotected_subobjects__ = 1
#TODO alguns atributos nao estao sendo usados. Limpar posteriormente.
mapping = {
'allergies': OOBTree,
'not_signed_allergies': OOBTree,
'medications': OOBTree,
'review_of_systems': OOBTree,
'problems': OOBTree,
'prescriptions': OOBTree,
'events': OOBTree,
'laboratory': OOBTree,
}
security = ClassSecurityInfo()
def __init__(self):
self.clean_chart()
def clean_chart(self):
mapping = self.mapping
for key, value in mapping.items():
| tal = getSite()
return getToolByName(portal, 'portal_membership').getAuthenticatedMember()
| identifier_body |
chartdata.py | __allow_access_to_unprotected_subobjects__ = 1
'''
EVENT TYPES
'''
# EVENT TYPES
CREATION = 1000
# set as attributes for EventBrain instances.
metadata = ['date_year', 'date_month', 'date_day', 'related_object_id',
'path', 'patient_id', 'id', 'event_type', 'meta_type']
def __init__(self, patient, ev_type, date, related_obj, author=None):
'''
author param is used only in migration of ChartItemEventWrapper's
'''
self.portal = getSite()
self.cct = getToolByName(self.portal, 'cmed_catalog_tool')
self.patient_id = patient.getId()
self.date = date
self.related_obj = related_obj
# author input keyword param is seted only when importing ChartDataItemWrapper's
if author:
self.author = author
else:
self.author = related_obj.getOwner().getId()
self.type = ev_type
# indexes and metadata
# self.event_text = self.eprint() # commenting: this is problemetic when migrating, since the object isnt in catalog yet. (ATBlob early creation)
self.date_year = date.year()
self.date_month = date.month()
self.date_day = date.day()
self.path = self.event_url()
self.event_type = self.type
self.meta_type = self.related_obj.meta_type
self.related_object_id = self.related_obj.getId()
# this attributes gonna replace self.related_obj soon.
# we gonna do this cause a reference for an object is not the right thing to do, in many
# cases can lead to bugs difficult to debug. ro = related_object.
# self.ro_id = self.related_obj.getId()
# self.ro_uid = self.related_obj.UID()
# self.ro_meta_type = self.related_obj.meta_type
self.catalog_me()
def catalog_me(self):
'''
index an event (through an EventBrain) in event_catalog.
'''
self.id = self.cct.event_catalog_map.new_docid()
self.cct.event_catalog_map.add(EventBrain(self), self.id)
self.cct.event_catalog.index_doc(self.id, self)
def get_contextualized_object(self):
'''
Used to workaround the problem of objects that getPhysicalPath doesnt work
properly. Returns a catalog brain object.
'''
uid = self.related_obj.UID()
portal_catalog = self.portal.portal_catalog
try:
return portal_catalog.search(dict(UID=uid))[0]
except IndexError:
return None
def event_url(self):
'''
used to solve a problem in some urls with absolute_url_path().
'''
portal_url = '/'.join(self.portal.getPhysicalPath())
patient_url = portal_url + '/Patients/' + self.patient_id
chart_url = patient_url + '/chartFolder_hidden'
if self.related_obj.meta_type == 'ChartItemEventWrapper':
return chart_url + self.related_obj.url_sufix
else:
contextualized_object = self.get_contextualized_object()
if contextualized_object != None:
return contextualized_object.getPath()
return None
def eprint(self):
'''
returns html to be printed in screen
'''
event_url = self.event_url()
if event_url == None:
klass = 'obj_deleted' # related object deleted.
event_url = ''
else:
klass = ''
if self.related_obj.meta_type == 'Visit':
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.getVisit_type())
else:
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.Title())
return self.prefix() + related_obj + self.posfix()
def prefix(self):
'''
called by eprint.
'''
# necessary to be here (and not in the header), since medicaldocument import chartdata too.
from wres.archetypes.content.medicaldocument import MedicalDocument
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return ''
elif self.related_obj.meta_type == 'Patient':
return 'Paciente '
elif isinstance(self.related_obj, ChartItemEventWrapper):
return self.related_obj.prefix
elif isinstance(self.related_obj, MedicalDocument):
|
elif self.related_obj.portal_type == 'Image':
return 'Imagem '
elif self.related_obj.portal_type == 'File':
return 'Arquivo '
return ''
def posfix(self):
'''
called by eprint
'''
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return self._visit_review_state()
else:
return ' adicionado.'
def getAuthor(self):
'''
If not admin, return the related object of the self.author.
'''
# admin doesnt have a member neither a related_object.
if self.author == 'admin':
return 'admin'
mt = getToolByName(self.portal, 'portal_membership')
member = mt.getMemberById(self.author)
return self.portal.unrestrictedTraverse(member.getProperty('related_object'))
def _visit_review_state(self):
'''
used only for visits.
'''
pw = getToolByName(self.portal, 'portal_workflow')
wf = getattr(pw, 'appointment_workflow')
pc = getToolByName(self.portal, 'portal_catalog')
# o getId algumas vezes retornava mais de um objeto nesta pesquisa. Por
# isso mudamos para UID.
brains = pc.search({'UID': self.related_obj.UID()})
if len(brains) > 1:
raise Exception('I found more than 1 visit with the same id.')
brain = brains[0]
state = getattr(wf.states, brain.review_state)
return ' (' + state.title_or_id().lower() + ').'
def _event_cmp(ev1, ev2):
'''
used for sorting events in patient.py
'''
if ev1.date < ev2.date:
return -1
if ev1.date == ev2.date:
return 0
else:
return 1
def export_dict(self):
'''
function that transform an event instance in a dictionary to be exported.
'''
if isinstance(self.related_obj, ChartItemEventWrapper):
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.meta_type,
'mapping_name' : self.related_obj.mapping_name, 'prefix' : self.related_obj.prefix,
'title' : self.related_obj.title, 'url_sufix' : self.related_obj.url_sufix,}
else:
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.getId()}
class ChartItemEventWrapper:
'''
wrapper for creating chart_data events.
'''
meta_type = 'ChartItemEventWrapper'
def __init__(self, mapping_name, patient, **object):
if mapping_name == 'medications':
self.prefix = 'Medicamento '
self.title = object['medication']
self.url_sufix = '/show_medications'
elif mapping_name == 'problems':
self.prefix = 'Diagnóstico '
self.title = object['problem']
self.url_sufix = '/show_problem_list'
elif mapping_name == 'allergies':
self.prefix = 'Alergia '
self.title = object['allergy']
self.url_sufix = '/show_allergies'
elif mapping_name == 'laboratory':
self.prefix = 'Exame '
self.title = object['exam']
self.url_sufix = '/show_exams'
elif mapping_name == 'prescriptions':
self.prefix = ''
self.title = 'Prescrição'
self.url_sufix = '/show_medications'
self.mapping_name = mapping_name
self.patient = patient
self.id = self.patient.getId() + '_' + mapping_name + '_' + self.title
def getId(self):
return self.id
def getOwner(self):
portal = getSite()
return getToolByName(portal, 'portal_membership').getAuthenticatedMember()
def Title(self):
return self.title
# TODO: Remover a partir de 01/04/2013
# def absolute_url_path(self):
# chart_folder = self.patient.chartFolder
# return chart_folder.absolute_url_path() + self.url_sufix
class ChartData(Persistent):
__allow_access_to_unprotected_subobjects__ = 1
#TODO alguns atributos nao estao sendo usados. Limpar posteriormente.
mapping = {
'allergies': OOBTree,
'not_signed_allergies': OOBTree,
'medications': OOBTree,
'review_of_systems': OOBTree,
'problems': OOBTree,
'prescriptions': OOBTree,
'events': OOBTree,
'laboratory': OOBTree,
}
security = ClassSecurityInfo()
def __init__(self):
self.clean_chart()
def clean_chart(self):
mapping = self.mapping
for key, value in mapping.items():
| return 'Documento ' | conditional_block |
chartdata.py | __allow_access_to_unprotected_subobjects__ = 1
'''
EVENT TYPES
'''
# EVENT TYPES
CREATION = 1000
# set as attributes for EventBrain instances.
metadata = ['date_year', 'date_month', 'date_day', 'related_object_id',
'path', 'patient_id', 'id', 'event_type', 'meta_type']
def __init__(self, patient, ev_type, date, related_obj, author=None):
'''
author param is used only in migration of ChartItemEventWrapper's
'''
self.portal = getSite()
self.cct = getToolByName(self.portal, 'cmed_catalog_tool')
self.patient_id = patient.getId()
self.date = date
self.related_obj = related_obj
# author input keyword param is seted only when importing ChartDataItemWrapper's
if author:
self.author = author
else:
self.author = related_obj.getOwner().getId()
self.type = ev_type
# indexes and metadata
# self.event_text = self.eprint() # commenting: this is problemetic when migrating, since the object isnt in catalog yet. (ATBlob early creation)
self.date_year = date.year()
self.date_month = date.month()
self.date_day = date.day()
self.path = self.event_url()
self.event_type = self.type
self.meta_type = self.related_obj.meta_type
self.related_object_id = self.related_obj.getId()
# this attributes gonna replace self.related_obj soon.
# we gonna do this cause a reference for an object is not the right thing to do, in many
# cases can lead to bugs difficult to debug. ro = related_object.
# self.ro_id = self.related_obj.getId()
# self.ro_uid = self.related_obj.UID()
# self.ro_meta_type = self.related_obj.meta_type
self.catalog_me()
def catalog_me(self):
'''
index an event (through an EventBrain) in event_catalog.
'''
self.id = self.cct.event_catalog_map.new_docid()
self.cct.event_catalog_map.add(EventBrain(self), self.id)
self.cct.event_catalog.index_doc(self.id, self)
def get_contextualized_object(self):
'''
Used to workaround the problem of objects that getPhysicalPath doesnt work
properly. Returns a catalog brain object.
'''
uid = self.related_obj.UID()
portal_catalog = self.portal.portal_catalog
try:
return portal_catalog.search(dict(UID=uid))[0]
except IndexError:
return None
def event_url(self):
'''
used to solve a problem in some urls with absolute_url_path().
'''
portal_url = '/'.join(self.portal.getPhysicalPath())
patient_url = portal_url + '/Patients/' + self.patient_id
chart_url = patient_url + '/chartFolder_hidden'
if self.related_obj.meta_type == 'ChartItemEventWrapper':
return chart_url + self.related_obj.url_sufix
else:
contextualized_object = self.get_contextualized_object()
if contextualized_object != None:
return contextualized_object.getPath()
return None
def eprint(self):
'''
returns html to be printed in screen
'''
event_url = self.event_url()
if event_url == None:
klass = 'obj_deleted' # related object deleted.
event_url = ''
else:
klass = ''
if self.related_obj.meta_type == 'Visit':
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.getVisit_type())
else:
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.Title())
return self.prefix() + related_obj + self.posfix()
def prefix(self):
'''
called by eprint.
'''
# necessary to be here (and not in the header), since medicaldocument import chartdata too.
from wres.archetypes.content.medicaldocument import MedicalDocument
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return ''
elif self.related_obj.meta_type == 'Patient':
return 'Paciente '
elif isinstance(self.related_obj, ChartItemEventWrapper):
return self.related_obj.prefix
elif isinstance(self.related_obj, MedicalDocument):
return 'Documento '
elif self.related_obj.portal_type == 'Image': | def posfix(self):
'''
called by eprint
'''
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return self._visit_review_state()
else:
return ' adicionado.'
def getAuthor(self):
'''
If not admin, return the related object of the self.author.
'''
# admin doesnt have a member neither a related_object.
if self.author == 'admin':
return 'admin'
mt = getToolByName(self.portal, 'portal_membership')
member = mt.getMemberById(self.author)
return self.portal.unrestrictedTraverse(member.getProperty('related_object'))
def _visit_review_state(self):
'''
used only for visits.
'''
pw = getToolByName(self.portal, 'portal_workflow')
wf = getattr(pw, 'appointment_workflow')
pc = getToolByName(self.portal, 'portal_catalog')
# o getId algumas vezes retornava mais de um objeto nesta pesquisa. Por
# isso mudamos para UID.
brains = pc.search({'UID': self.related_obj.UID()})
if len(brains) > 1:
raise Exception('I found more than 1 visit with the same id.')
brain = brains[0]
state = getattr(wf.states, brain.review_state)
return ' (' + state.title_or_id().lower() + ').'
def _event_cmp(ev1, ev2):
'''
used for sorting events in patient.py
'''
if ev1.date < ev2.date:
return -1
if ev1.date == ev2.date:
return 0
else:
return 1
def export_dict(self):
'''
function that transform an event instance in a dictionary to be exported.
'''
if isinstance(self.related_obj, ChartItemEventWrapper):
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.meta_type,
'mapping_name' : self.related_obj.mapping_name, 'prefix' : self.related_obj.prefix,
'title' : self.related_obj.title, 'url_sufix' : self.related_obj.url_sufix,}
else:
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.getId()}
class ChartItemEventWrapper:
'''
wrapper for creating chart_data events.
'''
meta_type = 'ChartItemEventWrapper'
def __init__(self, mapping_name, patient, **object):
if mapping_name == 'medications':
self.prefix = 'Medicamento '
self.title = object['medication']
self.url_sufix = '/show_medications'
elif mapping_name == 'problems':
self.prefix = 'Diagnóstico '
self.title = object['problem']
self.url_sufix = '/show_problem_list'
elif mapping_name == 'allergies':
self.prefix = 'Alergia '
self.title = object['allergy']
self.url_sufix = '/show_allergies'
elif mapping_name == 'laboratory':
self.prefix = 'Exame '
self.title = object['exam']
self.url_sufix = '/show_exams'
elif mapping_name == 'prescriptions':
self.prefix = ''
self.title = 'Prescrição'
self.url_sufix = '/show_medications'
self.mapping_name = mapping_name
self.patient = patient
self.id = self.patient.getId() + '_' + mapping_name + '_' + self.title
def getId(self):
return self.id
def getOwner(self):
portal = getSite()
return getToolByName(portal, 'portal_membership').getAuthenticatedMember()
def Title(self):
return self.title
# TODO: Remover a partir de 01/04/2013
# def absolute_url_path(self):
# chart_folder = self.patient.chartFolder
# return chart_folder.absolute_url_path() + self.url_sufix
class ChartData(Persistent):
__allow_access_to_unprotected_subobjects__ = 1
#TODO alguns atributos nao estao sendo usados. Limpar posteriormente.
mapping = {
'allergies': OOBTree,
'not_signed_allergies': OOBTree,
'medications': OOBTree,
'review_of_systems': OOBTree,
'problems': OOBTree,
'prescriptions': OOBTree,
'events': OOBTree,
'laboratory': OOBTree,
}
security = ClassSecurityInfo()
def __init__(self):
self.clean_chart()
def clean_chart(self):
mapping = self.mapping
for key, value in mapping.items():
setattr | return 'Imagem '
elif self.related_obj.portal_type == 'File':
return 'Arquivo '
return ''
| random_line_split |
chartdata.py | __allow_access_to_unprotected_subobjects__ = 1
'''
EVENT TYPES
'''
# EVENT TYPES
CREATION = 1000
# set as attributes for EventBrain instances.
metadata = ['date_year', 'date_month', 'date_day', 'related_object_id',
'path', 'patient_id', 'id', 'event_type', 'meta_type']
def __init__(self, patient, ev_type, date, related_obj, author=None):
'''
author param is used only in migration of ChartItemEventWrapper's
'''
self.portal = getSite()
self.cct = getToolByName(self.portal, 'cmed_catalog_tool')
self.patient_id = patient.getId()
self.date = date
self.related_obj = related_obj
# author input keyword param is seted only when importing ChartDataItemWrapper's
if author:
self.author = author
else:
self.author = related_obj.getOwner().getId()
self.type = ev_type
# indexes and metadata
# self.event_text = self.eprint() # commenting: this is problemetic when migrating, since the object isnt in catalog yet. (ATBlob early creation)
self.date_year = date.year()
self.date_month = date.month()
self.date_day = date.day()
self.path = self.event_url()
self.event_type = self.type
self.meta_type = self.related_obj.meta_type
self.related_object_id = self.related_obj.getId()
# this attributes gonna replace self.related_obj soon.
# we gonna do this cause a reference for an object is not the right thing to do, in many
# cases can lead to bugs difficult to debug. ro = related_object.
# self.ro_id = self.related_obj.getId()
# self.ro_uid = self.related_obj.UID()
# self.ro_meta_type = self.related_obj.meta_type
self.catalog_me()
def catalog_me(self):
'''
index an event (through an EventBrain) in event_catalog.
'''
self.id = self.cct.event_catalog_map.new_docid()
self.cct.event_catalog_map.add(EventBrain(self), self.id)
self.cct.event_catalog.index_doc(self.id, self)
def get_contextualized_object(self):
'''
Used to workaround the problem of objects that getPhysicalPath doesnt work
properly. Returns a catalog brain object.
'''
uid = self.related_obj.UID()
portal_catalog = self.portal.portal_catalog
try:
return portal_catalog.search(dict(UID=uid))[0]
except IndexError:
return None
def event_url(self):
'''
used to solve a problem in some urls with absolute_url_path().
'''
portal_url = '/'.join(self.portal.getPhysicalPath())
patient_url = portal_url + '/Patients/' + self.patient_id
chart_url = patient_url + '/chartFolder_hidden'
if self.related_obj.meta_type == 'ChartItemEventWrapper':
return chart_url + self.related_obj.url_sufix
else:
contextualized_object = self.get_contextualized_object()
if contextualized_object != None:
return contextualized_object.getPath()
return None
def eprint(self):
'''
returns html to be printed in screen
'''
event_url = self.event_url()
if event_url == None:
klass = 'obj_deleted' # related object deleted.
event_url = ''
else:
klass = ''
if self.related_obj.meta_type == 'Visit':
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.getVisit_type())
else:
related_obj = "<a class=\"%s\" target=\"_blank\" href=\"%s\"> %s </a>" % (klass, event_url, self.related_obj.Title())
return self.prefix() + related_obj + self.posfix()
def prefix(self):
'''
called by eprint.
'''
# necessary to be here (and not in the header), since medicaldocument import chartdata too.
from wres.archetypes.content.medicaldocument import MedicalDocument
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return ''
elif self.related_obj.meta_type == 'Patient':
return 'Paciente '
elif isinstance(self.related_obj, ChartItemEventWrapper):
return self.related_obj.prefix
elif isinstance(self.related_obj, MedicalDocument):
return 'Documento '
elif self.related_obj.portal_type == 'Image':
return 'Imagem '
elif self.related_obj.portal_type == 'File':
return 'Arquivo '
return ''
def posfix(self):
'''
called by eprint
'''
if self.type == Event.CREATION:
if self.related_obj.meta_type == 'Visit':
return self._visit_review_state()
else:
return ' adicionado.'
def getAuthor(self):
'''
If not admin, return the related object of the self.author.
'''
# admin doesnt have a member neither a related_object.
if self.author == 'admin':
return 'admin'
mt = getToolByName(self.portal, 'portal_membership')
member = mt.getMemberById(self.author)
return self.portal.unrestrictedTraverse(member.getProperty('related_object'))
def _visit_review_state(self):
'''
used only for visits.
'''
pw = getToolByName(self.portal, 'portal_workflow')
wf = getattr(pw, 'appointment_workflow')
pc = getToolByName(self.portal, 'portal_catalog')
# o getId algumas vezes retornava mais de um objeto nesta pesquisa. Por
# isso mudamos para UID.
brains = pc.search({'UID': self.related_obj.UID()})
if len(brains) > 1:
raise Exception('I found more than 1 visit with the same id.')
brain = brains[0]
state = getattr(wf.states, brain.review_state)
return ' (' + state.title_or_id().lower() + ').'
def _event_cmp(ev1, ev2):
'''
used for sorting events in patient.py
'''
if ev1.date < ev2.date:
return -1
if ev1.date == ev2.date:
return 0
else:
return 1
def | (self):
'''
function that transform an event instance in a dictionary to be exported.
'''
if isinstance(self.related_obj, ChartItemEventWrapper):
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.meta_type,
'mapping_name' : self.related_obj.mapping_name, 'prefix' : self.related_obj.prefix,
'title' : self.related_obj.title, 'url_sufix' : self.related_obj.url_sufix,}
else:
return {'type': self.type, 'date': self.date, 'author': self.author, 'related_obj' : self.related_obj.getId()}
class ChartItemEventWrapper:
'''
wrapper for creating chart_data events.
'''
meta_type = 'ChartItemEventWrapper'
def __init__(self, mapping_name, patient, **object):
if mapping_name == 'medications':
self.prefix = 'Medicamento '
self.title = object['medication']
self.url_sufix = '/show_medications'
elif mapping_name == 'problems':
self.prefix = 'Diagnóstico '
self.title = object['problem']
self.url_sufix = '/show_problem_list'
elif mapping_name == 'allergies':
self.prefix = 'Alergia '
self.title = object['allergy']
self.url_sufix = '/show_allergies'
elif mapping_name == 'laboratory':
self.prefix = 'Exame '
self.title = object['exam']
self.url_sufix = '/show_exams'
elif mapping_name == 'prescriptions':
self.prefix = ''
self.title = 'Prescrição'
self.url_sufix = '/show_medications'
self.mapping_name = mapping_name
self.patient = patient
self.id = self.patient.getId() + '_' + mapping_name + '_' + self.title
def getId(self):
return self.id
def getOwner(self):
portal = getSite()
return getToolByName(portal, 'portal_membership').getAuthenticatedMember()
def Title(self):
return self.title
# TODO: Remover a partir de 01/04/2013
# def absolute_url_path(self):
# chart_folder = self.patient.chartFolder
# return chart_folder.absolute_url_path() + self.url_sufix
class ChartData(Persistent):
__allow_access_to_unprotected_subobjects__ = 1
#TODO alguns atributos nao estao sendo usados. Limpar posteriormente.
mapping = {
'allergies': OOBTree,
'not_signed_allergies': OOBTree,
'medications': OOBTree,
'review_of_systems': OOBTree,
'problems': OOBTree,
'prescriptions': OOBTree,
'events': OOBTree,
'laboratory': OOBTree,
}
security = ClassSecurityInfo()
def __init__(self):
self.clean_chart()
def clean_chart(self):
mapping = self.mapping
for key, value in mapping.items():
| export_dict | identifier_name |
utils.py | hmac
import base64
import MySQLdb
import os
import re
import marshal
import subprocess
from sitescripts.utils import get_config, cached, get_template, anonymizeMail, sendMail
def getReportSubscriptions(guid):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN
#PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id)
WHERE report = %s''',
guid)
rows = cursor.fetchall()
cursor.close()
return rows
def getReports(startTime):
count = 10000
offset = 0
while True:
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',
(startTime, count, offset))
rows = cursor.fetchall()
cursor.close()
if len(rows) == 0:
break
for row in rows:
yield row
offset += len(rows)
def getReportsForUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',
contact)
rows = cursor.fetchall()
cursor.close()
return rows
def getReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)
report = cursor.fetchone()
if report == None:
return None
reportData = marshal.loads(report[0])
return reportData
| screenshot = reportData.get('screenshot', None)
if screenshot != None:
reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1
try:
saveScreenshot(guid, screenshot)
except (TypeError, UnicodeEncodeError):
reportData['hasscreenshot'] = 0
del reportData['screenshot']
knownIssues = len(reportData.get('knownIssues', []))
contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None
dumpstr = marshal.dumps(reportData)
if contact != None and isNew:
executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)
executeQuery(cursor,
'''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)
VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,
%(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY
UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,
hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',
{'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),
'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,
'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})
if len(reportData['subscriptions']) > 0:
for sn in reportData['subscriptions']:
executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])
id = cursor.fetchone()
if id != None:
def filterMatch(f):
return any(u == sn['id'] for u in f.get('subscriptions', []))
hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))
executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))
get_db().commit()
reportData['guid'] = guid
if contact:
# TODO: The mail anonymization should happen in the template, not here
origEmail = reportData['email']
email = reportData['email']
email = re.sub(r' at ', r'@', email)
email = re.sub(r' dot ', r'.', email)
reportData['email'] = anonymizeMail(email)
reportData['uid'] = contact
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
template = get_template(get_config().get('reports', 'webTemplate'))
template.stream(reportData).dump(file, encoding='utf-8')
if contact:
reportData['email'] = origEmail
def removeReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)
get_db().commit()
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
if os.path.isfile(file):
os.remove(file)
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
if os.path.isfile(file):
os.remove(file)
def getUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)
user = cursor.fetchone()
return user
@cached(3600)
def getUserUsefulnessScore(contact):
if contact == None:
return 0
cursor = get_db().cursor()
# source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
executeQuery(cursor,
'''SELECT ((positive + 1.9208) / (positive + negative)
- 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))
/ (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',
contact)
score = cursor.fetchone()
if score == None:
return 0
if score[0] == None: # no score yet
return 0.3
else:
return 4 * score[0]
def updateUserUsefulness(contact, newusefulness, oldusefulness):
new = int(newusefulness)
old = int(oldusefulness)
if new == old:
return
positive = 0
negative = 0
if old > 0:
positive -= 1
elif old < 0:
negative -= 1
if new > 0:
positive += 1
elif new < 0:
negative += 1
cursor = get_db().cursor()
executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))
get_db().commit()
def saveScreenshot(guid, screenshot):
prefix = 'data:image/png;base64,'
if not screenshot.startswith(prefix):
raise TypeError('Screenshot is not a PNG image')
data = base64.b64decode(screenshot[len(prefix):])
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(file, 'wb')
f.write(data)
f.close()
if get_config().has_option('reports', 'pngOptimizerPath'):
cmd = get_config().get('reports', 'pngOptimizerPath').split()
cmd.append(file)
subprocess.call(cmd)
def mailDigest(templateData):
sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)
def sendUpdateNotification(templateData):
sendMail(get_config().get('reports', 'notificationTemplate'), templateData)
def calculateReportSecret(guid):
return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()
def calculateReportSecret_compat(guid):
hash = hashlib |
def saveReport(guid, reportData, isNew=False):
cursor = get_db().cursor() | random_line_split |
utils.py | , cached, get_template, anonymizeMail, sendMail
def getReportSubscriptions(guid):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN
#PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id)
WHERE report = %s''',
guid)
rows = cursor.fetchall()
cursor.close()
return rows
def getReports(startTime):
count = 10000
offset = 0
while True:
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',
(startTime, count, offset))
rows = cursor.fetchall()
cursor.close()
if len(rows) == 0:
break
for row in rows:
yield row
offset += len(rows)
def getReportsForUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',
contact)
rows = cursor.fetchall()
cursor.close()
return rows
def getReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)
report = cursor.fetchone()
if report == None:
return None
reportData = marshal.loads(report[0])
return reportData
def saveReport(guid, reportData, isNew=False):
cursor = get_db().cursor()
screenshot = reportData.get('screenshot', None)
if screenshot != None:
reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1
try:
saveScreenshot(guid, screenshot)
except (TypeError, UnicodeEncodeError):
reportData['hasscreenshot'] = 0
del reportData['screenshot']
knownIssues = len(reportData.get('knownIssues', []))
contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None
dumpstr = marshal.dumps(reportData)
if contact != None and isNew:
executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)
executeQuery(cursor,
'''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)
VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,
%(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY
UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,
hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',
{'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),
'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,
'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})
if len(reportData['subscriptions']) > 0:
for sn in reportData['subscriptions']:
executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])
id = cursor.fetchone()
if id != None:
def filterMatch(f):
return any(u == sn['id'] for u in f.get('subscriptions', []))
hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))
executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))
get_db().commit()
reportData['guid'] = guid
if contact:
# TODO: The mail anonymization should happen in the template, not here
origEmail = reportData['email']
email = reportData['email']
email = re.sub(r' at ', r'@', email)
email = re.sub(r' dot ', r'.', email)
reportData['email'] = anonymizeMail(email)
reportData['uid'] = contact
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
template = get_template(get_config().get('reports', 'webTemplate'))
template.stream(reportData).dump(file, encoding='utf-8')
if contact:
reportData['email'] = origEmail
def removeReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)
get_db().commit()
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
if os.path.isfile(file):
os.remove(file)
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
if os.path.isfile(file):
os.remove(file)
def getUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)
user = cursor.fetchone()
return user
@cached(3600)
def getUserUsefulnessScore(contact):
if contact == None:
return 0
cursor = get_db().cursor()
# source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
executeQuery(cursor,
'''SELECT ((positive + 1.9208) / (positive + negative)
- 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))
/ (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',
contact)
score = cursor.fetchone()
if score == None:
return 0
if score[0] == None: # no score yet
return 0.3
else:
return 4 * score[0]
def updateUserUsefulness(contact, newusefulness, oldusefulness):
new = int(newusefulness)
old = int(oldusefulness)
if new == old:
return
positive = 0
negative = 0
if old > 0:
positive -= 1
elif old < 0:
negative -= 1
if new > 0:
positive += 1
elif new < 0:
negative += 1
cursor = get_db().cursor()
executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))
get_db().commit()
def saveScreenshot(guid, screenshot):
prefix = 'data:image/png;base64,'
if not screenshot.startswith(prefix):
raise TypeError('Screenshot is not a PNG image')
data = base64.b64decode(screenshot[len(prefix):])
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(file, 'wb')
f.write(data)
f.close()
if get_config().has_option('reports', 'pngOptimizerPath'):
cmd = get_config().get('reports', 'pngOptimizerPath').split()
cmd.append(file)
subprocess.call(cmd)
def mailDigest(templateData):
sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)
def sendUpdateNotification(templateData):
sendMail(get_config().get('reports', 'notificationTemplate'), templateData)
def calculateReportSecret(guid):
return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()
def calculateReportSecret_compat(guid):
hash = hashlib.md5()
hash.update(get_config().get('reports', 'secret'))
hash.update(guid)
return hash.hexdigest()
def | getUserId | identifier_name |
|
utils.py | PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',
(startTime, count, offset))
rows = cursor.fetchall()
cursor.close()
if len(rows) == 0:
break
for row in rows:
yield row
offset += len(rows)
def getReportsForUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',
contact)
rows = cursor.fetchall()
cursor.close()
return rows
def getReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)
report = cursor.fetchone()
if report == None:
return None
reportData = marshal.loads(report[0])
return reportData
def saveReport(guid, reportData, isNew=False):
cursor = get_db().cursor()
screenshot = reportData.get('screenshot', None)
if screenshot != None:
reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1
try:
saveScreenshot(guid, screenshot)
except (TypeError, UnicodeEncodeError):
reportData['hasscreenshot'] = 0
del reportData['screenshot']
knownIssues = len(reportData.get('knownIssues', []))
contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None
dumpstr = marshal.dumps(reportData)
if contact != None and isNew:
executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)
executeQuery(cursor,
'''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)
VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,
%(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY
UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,
hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',
{'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),
'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,
'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})
if len(reportData['subscriptions']) > 0:
for sn in reportData['subscriptions']:
executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])
id = cursor.fetchone()
if id != None:
def filterMatch(f):
return any(u == sn['id'] for u in f.get('subscriptions', []))
hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))
executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))
get_db().commit()
reportData['guid'] = guid
if contact:
# TODO: The mail anonymization should happen in the template, not here
origEmail = reportData['email']
email = reportData['email']
email = re.sub(r' at ', r'@', email)
email = re.sub(r' dot ', r'.', email)
reportData['email'] = anonymizeMail(email)
reportData['uid'] = contact
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
template = get_template(get_config().get('reports', 'webTemplate'))
template.stream(reportData).dump(file, encoding='utf-8')
if contact:
reportData['email'] = origEmail
def removeReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)
get_db().commit()
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
if os.path.isfile(file):
os.remove(file)
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
if os.path.isfile(file):
os.remove(file)
def getUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)
user = cursor.fetchone()
return user
@cached(3600)
def getUserUsefulnessScore(contact):
if contact == None:
return 0
cursor = get_db().cursor()
# source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
executeQuery(cursor,
'''SELECT ((positive + 1.9208) / (positive + negative)
- 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))
/ (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',
contact)
score = cursor.fetchone()
if score == None:
return 0
if score[0] == None: # no score yet
return 0.3
else:
return 4 * score[0]
def updateUserUsefulness(contact, newusefulness, oldusefulness):
new = int(newusefulness)
old = int(oldusefulness)
if new == old:
return
positive = 0
negative = 0
if old > 0:
positive -= 1
elif old < 0:
negative -= 1
if new > 0:
positive += 1
elif new < 0:
negative += 1
cursor = get_db().cursor()
executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))
get_db().commit()
def saveScreenshot(guid, screenshot):
prefix = 'data:image/png;base64,'
if not screenshot.startswith(prefix):
raise TypeError('Screenshot is not a PNG image')
data = base64.b64decode(screenshot[len(prefix):])
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(file, 'wb')
f.write(data)
f.close()
if get_config().has_option('reports', 'pngOptimizerPath'):
cmd = get_config().get('reports', 'pngOptimizerPath').split()
cmd.append(file)
subprocess.call(cmd)
def mailDigest(templateData):
sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)
def sendUpdateNotification(templateData):
sendMail(get_config().get('reports', 'notificationTemplate'), templateData)
def calculateReportSecret(guid):
return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()
def calculateReportSecret_compat(guid):
hash = hashlib.md5()
hash.update(get_config().get('reports', 'secret'))
hash.update(guid)
return hash.hexdigest()
def getUserId(email):
return hmac.new(get_config().get('reports', 'secret'), email.encode('utf-8')).hexdigest()
def getDigestId(email):
hash = hashlib.md5()
hash.update(email.encode('utf-8'))
return hash.hexdigest()
def getDigestPath(dir, email):
return os.path.join(dir, getDigestId(email) + '.html')
def getDigestSecret(id, (year, week, weekday)):
mac = hmac.new(get_config().get('reports', 'secret'), id)
mac.update(str(year))
mac.update(str(week))
return mac.hexdigest()
def getDigestSecret_compat(id, (year, week, weekday)):
| hash = hashlib.md5()
hash.update(get_config().get('reports', 'secret'))
hash.update(id)
hash.update(str(year))
hash.update(str(week))
return hash.hexdigest() | identifier_body |
|
utils.py | hmac
import base64
import MySQLdb
import os
import re
import marshal
import subprocess
from sitescripts.utils import get_config, cached, get_template, anonymizeMail, sendMail
def getReportSubscriptions(guid):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT url, hasmatches FROM #PFX#sublists INNER JOIN
#PFX#subscriptions ON (#PFX#sublists.list = #PFX#subscriptions.id)
WHERE report = %s''',
guid)
rows = cursor.fetchall()
cursor.close()
return rows
def getReports(startTime):
count = 10000
offset = 0
while True:
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE ctime >= FROM_UNIXTIME(%s) LIMIT %s OFFSET %s''',
(startTime, count, offset))
rows = cursor.fetchall()
cursor.close()
if len(rows) == 0:
|
for row in rows:
yield row
offset += len(rows)
def getReportsForUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor,
'''SELECT guid, type, UNIX_TIMESTAMP(ctime) AS ctime, status, site, contact,
comment, hasscreenshot, knownissues
FROM #PFX#reports WHERE contact = %s ORDER BY ctime DESC LIMIT 100''',
contact)
rows = cursor.fetchall()
cursor.close()
return rows
def getReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'SELECT dump FROM #PFX#reports WHERE guid = %s', guid)
report = cursor.fetchone()
if report == None:
return None
reportData = marshal.loads(report[0])
return reportData
def saveReport(guid, reportData, isNew=False):
cursor = get_db().cursor()
screenshot = reportData.get('screenshot', None)
if screenshot != None:
reportData['hasscreenshot'] = 2 if reportData.get('screenshotEdited', False) else 1
try:
saveScreenshot(guid, screenshot)
except (TypeError, UnicodeEncodeError):
reportData['hasscreenshot'] = 0
del reportData['screenshot']
knownIssues = len(reportData.get('knownIssues', []))
contact = getUserId(reportData.get('email', None)) if reportData.get('email', None) else None
dumpstr = marshal.dumps(reportData)
if contact != None and isNew:
executeQuery(cursor, 'INSERT INTO #PFX#users (id, reports) VALUES (%s, 1) ON DUPLICATE KEY UPDATE reports = reports + 1', contact)
executeQuery(cursor,
'''INSERT INTO #PFX#reports (guid, type, ctime, site, comment, status, contact, hasscreenshot, knownissues, dump)
VALUES (%(guid)s, %(type)s, FROM_UNIXTIME(%(ctime)s), %(site)s, %(comment)s, %(status)s, %(contact)s,
%(hasscreenshot)s, %(knownissues)s, _binary %(dump)s) ON DUPLICATE KEY
UPDATE type = %(type)s, site = %(site)s, comment = %(comment)s, status = %(status)s,
hasscreenshot = %(hasscreenshot)s, knownissues = %(knownissues)s, dump = _binary %(dump)s''',
{'guid': guid, 'type': reportData.get('type', None), 'ctime': reportData['time'], 'site': reportData.get('siteName', None),
'comment': reportData.get('comment', None), 'status': reportData.get('status', None), 'contact': contact,
'hasscreenshot': reportData.get('hasscreenshot', 0), 'knownissues': knownIssues, 'dump': dumpstr})
if len(reportData['subscriptions']) > 0:
for sn in reportData['subscriptions']:
executeQuery(cursor, 'SELECT id FROM #PFX#subscriptions WHERE url = %s', sn['id'])
id = cursor.fetchone()
if id != None:
def filterMatch(f):
return any(u == sn['id'] for u in f.get('subscriptions', []))
hasMatches = any(filterMatch(f) for f in reportData.get('filters', []))
executeQuery(cursor, 'INSERT IGNORE INTO #PFX#sublists (report, list, hasmatches) VALUES (%s, %s, %s)', (guid, id[0], hasMatches))
get_db().commit()
reportData['guid'] = guid
if contact:
# TODO: The mail anonymization should happen in the template, not here
origEmail = reportData['email']
email = reportData['email']
email = re.sub(r' at ', r'@', email)
email = re.sub(r' dot ', r'.', email)
reportData['email'] = anonymizeMail(email)
reportData['uid'] = contact
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
template = get_template(get_config().get('reports', 'webTemplate'))
template.stream(reportData).dump(file, encoding='utf-8')
if contact:
reportData['email'] = origEmail
def removeReport(guid):
cursor = get_db().cursor()
executeQuery(cursor, 'DELETE FROM #PFX#reports WHERE guid = %s', guid)
get_db().commit()
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.html')
if os.path.isfile(file):
os.remove(file)
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
if os.path.isfile(file):
os.remove(file)
def getUser(contact):
cursor = get_db().cursor(MySQLdb.cursors.DictCursor)
executeQuery(cursor, 'SELECT reports, positive, negative FROM #PFX#users WHERE id = %s', contact)
user = cursor.fetchone()
return user
@cached(3600)
def getUserUsefulnessScore(contact):
if contact == None:
return 0
cursor = get_db().cursor()
# source from http://www.evanmiller.org/how-not-to-sort-by-average-rating.html
executeQuery(cursor,
'''SELECT ((positive + 1.9208) / (positive + negative)
- 1.96 * SQRT((positive * negative) / (positive + negative) + 0.9604) / (positive + negative))
/ (1 + 3.8416 / (positive + negative)) AS score FROM #PFX#users WHERE id = %s''',
contact)
score = cursor.fetchone()
if score == None:
return 0
if score[0] == None: # no score yet
return 0.3
else:
return 4 * score[0]
def updateUserUsefulness(contact, newusefulness, oldusefulness):
new = int(newusefulness)
old = int(oldusefulness)
if new == old:
return
positive = 0
negative = 0
if old > 0:
positive -= 1
elif old < 0:
negative -= 1
if new > 0:
positive += 1
elif new < 0:
negative += 1
cursor = get_db().cursor()
executeQuery(cursor, 'UPDATE #PFX#users SET negative = negative + %s, positive = positive + %s WHERE id = %s', (negative, positive, contact))
get_db().commit()
def saveScreenshot(guid, screenshot):
prefix = 'data:image/png;base64,'
if not screenshot.startswith(prefix):
raise TypeError('Screenshot is not a PNG image')
data = base64.b64decode(screenshot[len(prefix):])
file = os.path.join(get_config().get('reports', 'dataPath'), guid[0], guid[1], guid[2], guid[3], guid + '.png')
dir = os.path.dirname(file)
if not os.path.exists(dir):
os.makedirs(dir)
f = open(file, 'wb')
f.write(data)
f.close()
if get_config().has_option('reports', 'pngOptimizerPath'):
cmd = get_config().get('reports', 'pngOptimizerPath').split()
cmd.append(file)
subprocess.call(cmd)
def mailDigest(templateData):
sendMail(get_config().get('reports', 'mailDigestTemplate'), templateData)
def sendUpdateNotification(templateData):
sendMail(get_config().get('reports', 'notificationTemplate'), templateData)
def calculateReportSecret(guid):
return hmac.new(get_config().get('reports', 'secret'), guid).hexdigest()
def calculateReportSecret_compat(guid):
hash = hashlib.md | break | conditional_block |
linux_abi.go | 2
iocNrshift = 0
iocTypeshift = (iocNrshift + iocNrbits)
iocSizeshift = (iocTypeshift + iocTypebits)
iocDirshift = (iocSizeshift + iocSizebits)
iocWrite = 1
iocRead = 2
// Linux /dev/sev-guest ioctl interface
iocTypeSnpGuestReq = 'S'
iocSnpWithoutNr = ((iocWrite | iocRead) << iocDirshift) |
(iocTypeSnpGuestReq << iocTypeshift) |
// unsafe.Sizeof(snpUserGuestRequest)
(32 << iocSizeshift)
// IocSnpGetReport is the ioctl command for getting an attestation report
IocSnpGetReport = iocSnpWithoutNr | (0x0 << iocNrshift)
// IocSnpGetDerivedKey is the ioctl command for getting a key derived from measured components and
// either the VCEK or VMRK.
IocSnpGetDerivedKey = iocSnpWithoutNr | (0x1 << iocNrshift)
// IocSnpGetReport is the ioctl command for getting an extended attestation report that includes
// certificate information.
IocSnpGetExtendedReport = iocSnpWithoutNr | (0x2 << iocNrshift)
// The message version for MSG_REPORT_REQ in the SNP API. Specified as 1.
guestMsgVersion = 1
// These numbers are from the uapi header sev_guest.h
snpResportRespSize = 4000
msgReportReqHeaderSize = 0x20
SnpReportRespReportSize = snpResportRespSize - msgReportReqHeaderSize
)
const (
// EsOk denotes success.
EsOk EsResult = iota
// EsUnsupported denotes that the requested operation is not supported.
EsUnsupported
// EsVmmError denotes that the virtual machine monitor was in an unexpected state.
EsVmmError
// EsDecodeFailed denotes that instruction decoding failed.
EsDecodeFailed
// EsException denotes that the GHCB communication caused an exception.
EsException
// EsRetry is the code for a retry instruction emulation
EsRetry
)
// SevEsErr is an error that interprets SEV-ES guest-host communication results.
type SevEsErr struct {
Result EsResult
}
func (err *SevEsErr) Error() string {
if err.Result == EsUnsupported {
return "requested operation not supported"
}
if err.Result == EsVmmError {
return "unexpected state from the VMM"
}
if err.Result == EsDecodeFailed {
return "instruction decoding failed"
}
if err.Result == EsException {
return "instruction caused exception"
}
if err.Result == EsRetry {
return "retry instruction emulation"
}
return "unknown error"
}
// SnpReportReqABI is Linux's sev-guest ioctl abi for sending a GET_REPORT request. See
// include/uapi/linux/sev-guest.h
type SnpReportReqABI struct {
// ReportData to be included in the report
ReportData [64]uint8
// Vmpl is the SEV-SNP VMPL level to be included in the report.
// The kernel must have access to the corresponding VMPCK.
Vmpl uint32
reserved [28]byte
}
// SnpReportRespABI is Linux's sev-guest ioctl abi for receiving a GET_REPORT response.
// The size is expected to be snpReportRespSize.
type SnpReportRespABI struct {
Status uint32
ReportSize uint32
reserved [0x20 - 8]byte
// Data is the response data, see SEV-SNP spec for the format
Data [SnpReportRespReportSize]uint8
}
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportReqABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish is a no-op.
func (r *SnpReportReqABI) Finish(_ BinaryConvertible) error { return nil }
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportRespABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish checks the status of the message and translates it to a Golang error.
func (r *SnpReportRespABI) Finish(_ BinaryConvertible) error {
if r.Status != 0 {
switch r.Status {
case 0x16: // Value from MSG_REPORT_RSP specification in SNP API.
return errors.New("get_report had invalid parameters")
default:
return fmt.Errorf("unknown status: 0x%x", r.Status)
}
}
return nil
}
// SnpDerivedKeyReqABI is the ABI representation of a request to the SEV guest device to derive a
// key from specified information.
type SnpDerivedKeyReqABI struct {
// RootKeySelect is all reserved bits except bit 0 for UseVMRK (1) or UseVCEK (0).
RootKeySelect uint32
reserved uint32
GuestFieldSelect uint64
// Vmpl to mix into the key. Must be greater than or equal to current Vmpl.
Vmpl uint32
// GuestSVN to mix into the key. Must be less than or equal to GuestSVN at launch.
GuestSVN uint32
// TCBVersion to mix into the key. Must be less than or equal to the CommittedTcb.
TCBVersion uint64
}
// Pointer returns a pointer to the object.
func (r *SnpDerivedKeyReqABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyReqABI) Finish(BinaryConvertible) error { return nil }
// ABI returns the ABI representation of this object.
func (r *SnpDerivedKeyReqABI) ABI() BinaryConversion { return r }
// SnpDerivedKeyRespABI represents the response to an SnpDerivedKeyReq.
type SnpDerivedKeyRespABI struct {
Status uint32
reserved [0x20 - 4]byte
Data [32]byte
}
// ABI returns the object itself.
func (r *SnpDerivedKeyRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpDerivedKeyRespABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyRespABI) Finish(BinaryConvertible) error {
switch r.Status {
case 0:
return nil
case 0x16:
return errors.New("msg_key_req error: invalid parameters")
default:
return fmt.Errorf("msg_key_req unknown status code: 0x%x", r.Status)
}
}
// SnpExtendedReportReqABI is Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request.
type SnpExtendedReportReqABI struct {
Data SnpReportReqABI
// Where to copy the certificate blob.
CertsAddress unsafe.Pointer
// length of the certificate blob
CertsLength uint32
}
// SnpExtendedReportReq is close to Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request,
// but uses safer types for the Ioctl interface.
type SnpExtendedReportReq struct {
Data SnpReportReqABI
// Certs receives the certificate blob after the extended report request.
Certs []byte
// CertsLength is the length of the certificate blob.
CertsLength uint32
}
// Pointer returns a pointer so the object itself.
func (r *SnpExtendedReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish writes back the changed CertsLength value.
func (r *SnpExtendedReportReqABI) Finish(b BinaryConvertible) error {
s, ok := b.(*SnpExtendedReportReq)
if !ok {
return fmt.Errorf("Finish argument is %v. Expects a *SnpExtendedReportReq", reflect.TypeOf(b))
}
s.CertsLength = r.CertsLength
return nil
}
// ABI returns an object that can cross the ABI boundary and copy back changes to the original
// object.
func (r *SnpExtendedReportReq) | () BinaryConversion {
var certsAddress unsafe.Pointer
if len(r.Certs) != 0 {
certsAddress = unsafe.Pointer(&r.Certs[0])
}
return &SnpExtendedReportReqABI{
Data: r.Data,
CertsAddress: certsAddress,
CertsLength: r.CertsLength,
}
}
// SnpUserGuestRequestABI is Linux's sev-guest ioctl abi for issuing a guest message.
type SnpUserGuestRequestABI struct {
GuestMsgVersion uint32
// Request and response structure address.
ReqData unsafe.Pointer
RespData unsafe.Pointer
// | ABI | identifier_name |
linux_abi.go | 2
iocNrshift = 0
iocTypeshift = (iocNrshift + iocNrbits)
iocSizeshift = (iocTypeshift + iocTypebits)
iocDirshift = (iocSizeshift + iocSizebits)
iocWrite = 1
iocRead = 2
// Linux /dev/sev-guest ioctl interface
iocTypeSnpGuestReq = 'S'
iocSnpWithoutNr = ((iocWrite | iocRead) << iocDirshift) |
(iocTypeSnpGuestReq << iocTypeshift) |
// unsafe.Sizeof(snpUserGuestRequest)
(32 << iocSizeshift)
// IocSnpGetReport is the ioctl command for getting an attestation report
IocSnpGetReport = iocSnpWithoutNr | (0x0 << iocNrshift)
// IocSnpGetDerivedKey is the ioctl command for getting a key derived from measured components and
// either the VCEK or VMRK.
IocSnpGetDerivedKey = iocSnpWithoutNr | (0x1 << iocNrshift)
// IocSnpGetReport is the ioctl command for getting an extended attestation report that includes
// certificate information.
IocSnpGetExtendedReport = iocSnpWithoutNr | (0x2 << iocNrshift)
// The message version for MSG_REPORT_REQ in the SNP API. Specified as 1.
guestMsgVersion = 1
// These numbers are from the uapi header sev_guest.h
snpResportRespSize = 4000
msgReportReqHeaderSize = 0x20
SnpReportRespReportSize = snpResportRespSize - msgReportReqHeaderSize
)
const (
// EsOk denotes success.
EsOk EsResult = iota
// EsUnsupported denotes that the requested operation is not supported.
EsUnsupported
// EsVmmError denotes that the virtual machine monitor was in an unexpected state.
EsVmmError
// EsDecodeFailed denotes that instruction decoding failed.
EsDecodeFailed
// EsException denotes that the GHCB communication caused an exception. | // EsRetry is the code for a retry instruction emulation
EsRetry
)
// SevEsErr is an error that interprets SEV-ES guest-host communication results.
type SevEsErr struct {
Result EsResult
}
func (err *SevEsErr) Error() string {
if err.Result == EsUnsupported {
return "requested operation not supported"
}
if err.Result == EsVmmError {
return "unexpected state from the VMM"
}
if err.Result == EsDecodeFailed {
return "instruction decoding failed"
}
if err.Result == EsException {
return "instruction caused exception"
}
if err.Result == EsRetry {
return "retry instruction emulation"
}
return "unknown error"
}
// SnpReportReqABI is Linux's sev-guest ioctl abi for sending a GET_REPORT request. See
// include/uapi/linux/sev-guest.h
type SnpReportReqABI struct {
// ReportData to be included in the report
ReportData [64]uint8
// Vmpl is the SEV-SNP VMPL level to be included in the report.
// The kernel must have access to the corresponding VMPCK.
Vmpl uint32
reserved [28]byte
}
// SnpReportRespABI is Linux's sev-guest ioctl abi for receiving a GET_REPORT response.
// The size is expected to be snpReportRespSize.
type SnpReportRespABI struct {
Status uint32
ReportSize uint32
reserved [0x20 - 8]byte
// Data is the response data, see SEV-SNP spec for the format
Data [SnpReportRespReportSize]uint8
}
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportReqABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish is a no-op.
func (r *SnpReportReqABI) Finish(_ BinaryConvertible) error { return nil }
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportRespABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish checks the status of the message and translates it to a Golang error.
func (r *SnpReportRespABI) Finish(_ BinaryConvertible) error {
if r.Status != 0 {
switch r.Status {
case 0x16: // Value from MSG_REPORT_RSP specification in SNP API.
return errors.New("get_report had invalid parameters")
default:
return fmt.Errorf("unknown status: 0x%x", r.Status)
}
}
return nil
}
// SnpDerivedKeyReqABI is the ABI representation of a request to the SEV guest device to derive a
// key from specified information.
type SnpDerivedKeyReqABI struct {
// RootKeySelect is all reserved bits except bit 0 for UseVMRK (1) or UseVCEK (0).
RootKeySelect uint32
reserved uint32
GuestFieldSelect uint64
// Vmpl to mix into the key. Must be greater than or equal to current Vmpl.
Vmpl uint32
// GuestSVN to mix into the key. Must be less than or equal to GuestSVN at launch.
GuestSVN uint32
// TCBVersion to mix into the key. Must be less than or equal to the CommittedTcb.
TCBVersion uint64
}
// Pointer returns a pointer to the object.
func (r *SnpDerivedKeyReqABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyReqABI) Finish(BinaryConvertible) error { return nil }
// ABI returns the ABI representation of this object.
func (r *SnpDerivedKeyReqABI) ABI() BinaryConversion { return r }
// SnpDerivedKeyRespABI represents the response to an SnpDerivedKeyReq.
type SnpDerivedKeyRespABI struct {
Status uint32
reserved [0x20 - 4]byte
Data [32]byte
}
// ABI returns the object itself.
func (r *SnpDerivedKeyRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpDerivedKeyRespABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyRespABI) Finish(BinaryConvertible) error {
switch r.Status {
case 0:
return nil
case 0x16:
return errors.New("msg_key_req error: invalid parameters")
default:
return fmt.Errorf("msg_key_req unknown status code: 0x%x", r.Status)
}
}
// SnpExtendedReportReqABI is Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request.
type SnpExtendedReportReqABI struct {
Data SnpReportReqABI
// Where to copy the certificate blob.
CertsAddress unsafe.Pointer
// length of the certificate blob
CertsLength uint32
}
// SnpExtendedReportReq is close to Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request,
// but uses safer types for the Ioctl interface.
type SnpExtendedReportReq struct {
Data SnpReportReqABI
// Certs receives the certificate blob after the extended report request.
Certs []byte
// CertsLength is the length of the certificate blob.
CertsLength uint32
}
// Pointer returns a pointer so the object itself.
func (r *SnpExtendedReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish writes back the changed CertsLength value.
func (r *SnpExtendedReportReqABI) Finish(b BinaryConvertible) error {
s, ok := b.(*SnpExtendedReportReq)
if !ok {
return fmt.Errorf("Finish argument is %v. Expects a *SnpExtendedReportReq", reflect.TypeOf(b))
}
s.CertsLength = r.CertsLength
return nil
}
// ABI returns an object that can cross the ABI boundary and copy back changes to the original
// object.
func (r *SnpExtendedReportReq) ABI() BinaryConversion {
var certsAddress unsafe.Pointer
if len(r.Certs) != 0 {
certsAddress = unsafe.Pointer(&r.Certs[0])
}
return &SnpExtendedReportReqABI{
Data: r.Data,
CertsAddress: certsAddress,
CertsLength: r.CertsLength,
}
}
// SnpUserGuestRequestABI is Linux's sev-guest ioctl abi for issuing a guest message.
type SnpUserGuestRequestABI struct {
GuestMsgVersion uint32
// Request and response structure address.
ReqData unsafe.Pointer
RespData unsafe.Pointer
// firmware error | EsException | random_line_split |
linux_abi.go |
msgReportReqHeaderSize = 0x20
SnpReportRespReportSize = snpResportRespSize - msgReportReqHeaderSize
)
const (
// EsOk denotes success.
EsOk EsResult = iota
// EsUnsupported denotes that the requested operation is not supported.
EsUnsupported
// EsVmmError denotes that the virtual machine monitor was in an unexpected state.
EsVmmError
// EsDecodeFailed denotes that instruction decoding failed.
EsDecodeFailed
// EsException denotes that the GHCB communication caused an exception.
EsException
// EsRetry is the code for a retry instruction emulation
EsRetry
)
// SevEsErr is an error that interprets SEV-ES guest-host communication results.
type SevEsErr struct {
Result EsResult
}
func (err *SevEsErr) Error() string {
if err.Result == EsUnsupported {
return "requested operation not supported"
}
if err.Result == EsVmmError {
return "unexpected state from the VMM"
}
if err.Result == EsDecodeFailed {
return "instruction decoding failed"
}
if err.Result == EsException {
return "instruction caused exception"
}
if err.Result == EsRetry {
return "retry instruction emulation"
}
return "unknown error"
}
// SnpReportReqABI is Linux's sev-guest ioctl abi for sending a GET_REPORT request. See
// include/uapi/linux/sev-guest.h
type SnpReportReqABI struct {
// ReportData to be included in the report
ReportData [64]uint8
// Vmpl is the SEV-SNP VMPL level to be included in the report.
// The kernel must have access to the corresponding VMPCK.
Vmpl uint32
reserved [28]byte
}
// SnpReportRespABI is Linux's sev-guest ioctl abi for receiving a GET_REPORT response.
// The size is expected to be snpReportRespSize.
type SnpReportRespABI struct {
Status uint32
ReportSize uint32
reserved [0x20 - 8]byte
// Data is the response data, see SEV-SNP spec for the format
Data [SnpReportRespReportSize]uint8
}
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportReqABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish is a no-op.
func (r *SnpReportReqABI) Finish(_ BinaryConvertible) error { return nil }
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportRespABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish checks the status of the message and translates it to a Golang error.
func (r *SnpReportRespABI) Finish(_ BinaryConvertible) error {
if r.Status != 0 {
switch r.Status {
case 0x16: // Value from MSG_REPORT_RSP specification in SNP API.
return errors.New("get_report had invalid parameters")
default:
return fmt.Errorf("unknown status: 0x%x", r.Status)
}
}
return nil
}
// SnpDerivedKeyReqABI is the ABI representation of a request to the SEV guest device to derive a
// key from specified information.
type SnpDerivedKeyReqABI struct {
// RootKeySelect is all reserved bits except bit 0 for UseVMRK (1) or UseVCEK (0).
RootKeySelect uint32
reserved uint32
GuestFieldSelect uint64
// Vmpl to mix into the key. Must be greater than or equal to current Vmpl.
Vmpl uint32
// GuestSVN to mix into the key. Must be less than or equal to GuestSVN at launch.
GuestSVN uint32
// TCBVersion to mix into the key. Must be less than or equal to the CommittedTcb.
TCBVersion uint64
}
// Pointer returns a pointer to the object.
func (r *SnpDerivedKeyReqABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyReqABI) Finish(BinaryConvertible) error { return nil }
// ABI returns the ABI representation of this object.
func (r *SnpDerivedKeyReqABI) ABI() BinaryConversion { return r }
// SnpDerivedKeyRespABI represents the response to an SnpDerivedKeyReq.
type SnpDerivedKeyRespABI struct {
Status uint32
reserved [0x20 - 4]byte
Data [32]byte
}
// ABI returns the object itself.
func (r *SnpDerivedKeyRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpDerivedKeyRespABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyRespABI) Finish(BinaryConvertible) error {
switch r.Status {
case 0:
return nil
case 0x16:
return errors.New("msg_key_req error: invalid parameters")
default:
return fmt.Errorf("msg_key_req unknown status code: 0x%x", r.Status)
}
}
// SnpExtendedReportReqABI is Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request.
type SnpExtendedReportReqABI struct {
Data SnpReportReqABI
// Where to copy the certificate blob.
CertsAddress unsafe.Pointer
// length of the certificate blob
CertsLength uint32
}
// SnpExtendedReportReq is close to Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request,
// but uses safer types for the Ioctl interface.
type SnpExtendedReportReq struct {
Data SnpReportReqABI
// Certs receives the certificate blob after the extended report request.
Certs []byte
// CertsLength is the length of the certificate blob.
CertsLength uint32
}
// Pointer returns a pointer so the object itself.
func (r *SnpExtendedReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish writes back the changed CertsLength value.
func (r *SnpExtendedReportReqABI) Finish(b BinaryConvertible) error {
s, ok := b.(*SnpExtendedReportReq)
if !ok {
return fmt.Errorf("Finish argument is %v. Expects a *SnpExtendedReportReq", reflect.TypeOf(b))
}
s.CertsLength = r.CertsLength
return nil
}
// ABI returns an object that can cross the ABI boundary and copy back changes to the original
// object.
func (r *SnpExtendedReportReq) ABI() BinaryConversion {
var certsAddress unsafe.Pointer
if len(r.Certs) != 0 {
certsAddress = unsafe.Pointer(&r.Certs[0])
}
return &SnpExtendedReportReqABI{
Data: r.Data,
CertsAddress: certsAddress,
CertsLength: r.CertsLength,
}
}
// SnpUserGuestRequestABI is Linux's sev-guest ioctl abi for issuing a guest message.
type SnpUserGuestRequestABI struct {
GuestMsgVersion uint32
// Request and response structure address.
ReqData unsafe.Pointer
RespData unsafe.Pointer
// firmware error code on failure (see psp-sev.h in Linux kernel)
FwErr uint64
}
type snpUserGuestRequestConversion struct {
abi SnpUserGuestRequestABI
reqConv BinaryConversion
respConv BinaryConversion
}
// SnpUserGuestRequest is Linux's sev-guest ioctl interface for issuing a guest message. The
// types here enhance runtime safety when using Ioctl as an interface.
type SnpUserGuestRequest struct {
// Request and response structure address.
ReqData BinaryConvertible
RespData BinaryConvertible
// firmware error code on failure (see psp-sev.h in Linux kernel)
FwErr uint64
}
// ABI returns an object that can cross the ABI boundary and copy back changes to the original
// object.
func (r *SnpUserGuestRequest) ABI() BinaryConversion {
result := &snpUserGuestRequestConversion{
reqConv: r.ReqData.ABI(),
respConv: r.RespData.ABI(),
}
result.abi.GuestMsgVersion = guestMsgVersion
result.abi.ReqData = result.reqConv.Pointer()
result.abi.RespData = result.respConv.Pointer()
return result
}
// Pointer returns a pointer to the object that crosses the ABI boundary.
func (r *snpUserGuestRequestConversion) Pointer() unsafe.Pointer {
return unsafe.Pointer(&r.abi)
}
// Finish writes back the FwErr and any changes to the request or response objects.
func (r *snpUserGuestRequestConversion) Finish(b BinaryConvertible) error {
s, ok := b.(*SnpUserGuestRequest)
if !ok | {
return fmt.Errorf("Finish argument is %v. Expects a *SnpUserGuestRequestSafe", reflect.TypeOf(b))
} | conditional_block |
|
linux_abi.go | 2
iocNrshift = 0
iocTypeshift = (iocNrshift + iocNrbits)
iocSizeshift = (iocTypeshift + iocTypebits)
iocDirshift = (iocSizeshift + iocSizebits)
iocWrite = 1
iocRead = 2
// Linux /dev/sev-guest ioctl interface
iocTypeSnpGuestReq = 'S'
iocSnpWithoutNr = ((iocWrite | iocRead) << iocDirshift) |
(iocTypeSnpGuestReq << iocTypeshift) |
// unsafe.Sizeof(snpUserGuestRequest)
(32 << iocSizeshift)
// IocSnpGetReport is the ioctl command for getting an attestation report
IocSnpGetReport = iocSnpWithoutNr | (0x0 << iocNrshift)
// IocSnpGetDerivedKey is the ioctl command for getting a key derived from measured components and
// either the VCEK or VMRK.
IocSnpGetDerivedKey = iocSnpWithoutNr | (0x1 << iocNrshift)
// IocSnpGetReport is the ioctl command for getting an extended attestation report that includes
// certificate information.
IocSnpGetExtendedReport = iocSnpWithoutNr | (0x2 << iocNrshift)
// The message version for MSG_REPORT_REQ in the SNP API. Specified as 1.
guestMsgVersion = 1
// These numbers are from the uapi header sev_guest.h
snpResportRespSize = 4000
msgReportReqHeaderSize = 0x20
SnpReportRespReportSize = snpResportRespSize - msgReportReqHeaderSize
)
const (
// EsOk denotes success.
EsOk EsResult = iota
// EsUnsupported denotes that the requested operation is not supported.
EsUnsupported
// EsVmmError denotes that the virtual machine monitor was in an unexpected state.
EsVmmError
// EsDecodeFailed denotes that instruction decoding failed.
EsDecodeFailed
// EsException denotes that the GHCB communication caused an exception.
EsException
// EsRetry is the code for a retry instruction emulation
EsRetry
)
// SevEsErr is an error that interprets SEV-ES guest-host communication results.
type SevEsErr struct {
Result EsResult
}
func (err *SevEsErr) Error() string {
if err.Result == EsUnsupported {
return "requested operation not supported"
}
if err.Result == EsVmmError {
return "unexpected state from the VMM"
}
if err.Result == EsDecodeFailed {
return "instruction decoding failed"
}
if err.Result == EsException {
return "instruction caused exception"
}
if err.Result == EsRetry {
return "retry instruction emulation"
}
return "unknown error"
}
// SnpReportReqABI is Linux's sev-guest ioctl abi for sending a GET_REPORT request. See
// include/uapi/linux/sev-guest.h
type SnpReportReqABI struct {
// ReportData to be included in the report
ReportData [64]uint8
// Vmpl is the SEV-SNP VMPL level to be included in the report.
// The kernel must have access to the corresponding VMPCK.
Vmpl uint32
reserved [28]byte
}
// SnpReportRespABI is Linux's sev-guest ioctl abi for receiving a GET_REPORT response.
// The size is expected to be snpReportRespSize.
type SnpReportRespABI struct {
Status uint32
ReportSize uint32
reserved [0x20 - 8]byte
// Data is the response data, see SEV-SNP spec for the format
Data [SnpReportRespReportSize]uint8
}
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportReqABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish is a no-op.
func (r *SnpReportReqABI) Finish(_ BinaryConvertible) error |
// ABI returns the same object since it doesn't need a separate representation across the interface.
func (r *SnpReportRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpReportRespABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish checks the status of the message and translates it to a Golang error.
func (r *SnpReportRespABI) Finish(_ BinaryConvertible) error {
if r.Status != 0 {
switch r.Status {
case 0x16: // Value from MSG_REPORT_RSP specification in SNP API.
return errors.New("get_report had invalid parameters")
default:
return fmt.Errorf("unknown status: 0x%x", r.Status)
}
}
return nil
}
// SnpDerivedKeyReqABI is the ABI representation of a request to the SEV guest device to derive a
// key from specified information.
type SnpDerivedKeyReqABI struct {
// RootKeySelect is all reserved bits except bit 0 for UseVMRK (1) or UseVCEK (0).
RootKeySelect uint32
reserved uint32
GuestFieldSelect uint64
// Vmpl to mix into the key. Must be greater than or equal to current Vmpl.
Vmpl uint32
// GuestSVN to mix into the key. Must be less than or equal to GuestSVN at launch.
GuestSVN uint32
// TCBVersion to mix into the key. Must be less than or equal to the CommittedTcb.
TCBVersion uint64
}
// Pointer returns a pointer to the object.
func (r *SnpDerivedKeyReqABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyReqABI) Finish(BinaryConvertible) error { return nil }
// ABI returns the ABI representation of this object.
func (r *SnpDerivedKeyReqABI) ABI() BinaryConversion { return r }
// SnpDerivedKeyRespABI represents the response to an SnpDerivedKeyReq.
type SnpDerivedKeyRespABI struct {
Status uint32
reserved [0x20 - 4]byte
Data [32]byte
}
// ABI returns the object itself.
func (r *SnpDerivedKeyRespABI) ABI() BinaryConversion { return r }
// Pointer returns a pointer to the object itself.
func (r *SnpDerivedKeyRespABI) Pointer() unsafe.Pointer { return unsafe.Pointer(r) }
// Finish is a no-op.
func (r *SnpDerivedKeyRespABI) Finish(BinaryConvertible) error {
switch r.Status {
case 0:
return nil
case 0x16:
return errors.New("msg_key_req error: invalid parameters")
default:
return fmt.Errorf("msg_key_req unknown status code: 0x%x", r.Status)
}
}
// SnpExtendedReportReqABI is Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request.
type SnpExtendedReportReqABI struct {
Data SnpReportReqABI
// Where to copy the certificate blob.
CertsAddress unsafe.Pointer
// length of the certificate blob
CertsLength uint32
}
// SnpExtendedReportReq is close to Linux's sev-guest ioctl abi for sending a GET_EXTENDED_REPORT request,
// but uses safer types for the Ioctl interface.
type SnpExtendedReportReq struct {
Data SnpReportReqABI
// Certs receives the certificate blob after the extended report request.
Certs []byte
// CertsLength is the length of the certificate blob.
CertsLength uint32
}
// Pointer returns a pointer so the object itself.
func (r *SnpExtendedReportReqABI) Pointer() unsafe.Pointer {
return unsafe.Pointer(r)
}
// Finish writes back the changed CertsLength value.
func (r *SnpExtendedReportReqABI) Finish(b BinaryConvertible) error {
s, ok := b.(*SnpExtendedReportReq)
if !ok {
return fmt.Errorf("Finish argument is %v. Expects a *SnpExtendedReportReq", reflect.TypeOf(b))
}
s.CertsLength = r.CertsLength
return nil
}
// ABI returns an object that can cross the ABI boundary and copy back changes to the original
// object.
func (r *SnpExtendedReportReq) ABI() BinaryConversion {
var certsAddress unsafe.Pointer
if len(r.Certs) != 0 {
certsAddress = unsafe.Pointer(&r.Certs[0])
}
return &SnpExtendedReportReqABI{
Data: r.Data,
CertsAddress: certsAddress,
CertsLength: r.CertsLength,
}
}
// SnpUserGuestRequestABI is Linux's sev-guest ioctl abi for issuing a guest message.
type SnpUserGuestRequestABI struct {
GuestMsgVersion uint32
// Request and response structure address.
ReqData unsafe.Pointer
RespData unsafe.Pointer
// | { return nil } | identifier_body |
EachBehavior.ts | validateNotDefinedIf, validateNotEmptyString, validateNotNullIfFieldEquals, validateOneOf, validateValidId } from 'validator/Validations';
import ComponentTransitions from "component/ComponentTransitions";
import AttributeParser from 'validator/AttributeParser';
import EachTemplateAttributes from "behavior/core/each/EachTemplateAttributes";
import AttributeParserImpl from "validator/AttributeParserImpl";
import { NodeTypes } from "Constants";
import { ATTRIBUTE_DELIMITER } from "const/HardValues";
import Messages from "util/Messages";
import AbstractContainerBehavior from "behavior/AbstractContainerBehavior";
const DEFAULT_ATTRIBUTES: EachAttributes = {
mode: "generated",
idkey: DEFAULT_ID_KEY,
expression: null
};
const TEMPLATE_ATTRIBUTE_PARSER: AttributeParser<EachTemplateAttributes> = new AttributeParserImpl<EachTemplateAttributes>();
TEMPLATE_ATTRIBUTE_PARSER.setDefaults({
type: null,
test: null,
component: null,
module: null,
value: null
});
TEMPLATE_ATTRIBUTE_PARSER.setValidations({
type: [
validateDefined,
validateOneOf(EachTemplateType.EMPTY, EachTemplateType.FIRST, EachTemplateType.LAST, EachTemplateType.ALT, EachTemplateType.ITEM)
],
test: [validateNotEmptyString, validateNotNullIfFieldEquals(Attrs.TYPE, EachTemplateType.ALT)],
component: [
validateValidId,
validateDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount === 0, "template body was not supplied"),
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
],
module: [
validateValidId,
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
]
});
class EachBehavior extends AbstractContainerBehavior<any[], HTMLElement, EachAttributes> {
private map: SimpleMap<Nestable>;
private empty: Nestable;
private first: Nestable;
private last: Nestable;
private ids: string[];
private localScope: ScopeImpl;
private scopeItem: any;
private itemFactory: ComponentFactory;
private idStrategy: IdStrategy;
private elIsSelect: boolean;
private alternatives: {
test: Evaluator;
factory: ComponentFactory;
}[];
constructor() {
super();
this.setFlag(BehaviorFlags.CHILD_CONSUMPTION_PROHIBITED);
this.setDefaults(DEFAULT_ATTRIBUTES);
this.setValidations({
idkey: [validateDefined, validateNotEmptyString],
expression: [validateNotEmptyString, validateNotNullIfFieldEquals("mode", "expression")],
mode: [validateDefined, validateOneOf('none', 'generated', 'expression')]
});
}
public onInit(): void {
this.elIsSelect = this.getEl().tagName.toLowerCase() === "select";
}
public onMount(): void {
this.initFields();
this.initScope();
this.initIdStrategy();
this.parseChildElements();
this.onTargetChange(null, this.getMediator().get());
if (this.isMutable()) {
this.getMediator().watch(this, this.onTargetChange);
}
this.tellChildren(ComponentTransitions.MOUNT);
}
public onUnmount(): void {
this.tellChildren(ComponentTransitions.UNMOUNT);
}
public onRemount(): void {
this.tellChildren(ComponentTransitions.MOUNT);
}
public requestDigestionSources(sources: DigestableSource[]): void {
for (const key in this.map) {
if (!this.map.hasOwnProperty(key)) {
continue;
}
const component: Nestable = this.map[key];
sources.push(component);
}
if (this.first) {
sources.push(this.first);
}
if (this.last) {
sources.push(this.last);
}
if (this.empty) {
sources.push(this.empty);
}
}
protected onTargetChange(previous: any[], current: any[]): void {
const newIds: string[] = [];
const items: any[] = current || [];
// tslint:disable-next-line
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (!this.idStrategy.check(item)) {
this.idStrategy.enrich(item, i);
}
const id: string = this.idStrategy.extract(item);
newIds.push(id);
}
if (!equals(10, this.ids, newIds)) {
const newMap: SimpleMap<Nestable> = {};
const components: Nestable[] = [];
for (const item of items) {
const id: string = this.idStrategy.extract(item);
const component: Nestable = this.map[id] ? this.map[id] : this.create(item);
newMap[id] = component;
components.push(component);
delete this.map[id];
}
for (const key in this.map) {
if (this.map.hasOwnProperty(key)) {
const component: Nestable = this.map[key];
component.tell(ComponentTransitions.UNMOUNT);
delete this.map[key];
}
}
this.map = newMap;
const el: HTMLElement = this.getEl();
removeChildElements(el);
if (components.length === 0) {
if (this.empty) {
el.appendChild(this.empty.getEl());
}
} else {
const workingEl: HTMLElement | DocumentFragment = this.elIsSelect ? el : this.getDom().createDocumentFragment();
if (this.first) {
workingEl.appendChild(this.first.getEl());
}
for (const component of components) {
workingEl.appendChild(component.getEl());
}
if (this.last) {
workingEl.appendChild(this.last.getEl());
}
if (!this.elIsSelect) {
el.appendChild(workingEl);
}
}
}
this.ids = newIds;
}
private initFields(): void {
this.map = {};
this.empty = null;
this.ids = [];
this.itemFactory = null;
this.alternatives = [];
}
private initScope(): void {
this.localScope = new ScopeImpl(false);
const modelFn: () => any = () => this.getModelFn();
const itemFn: () => any = () => this.scopeItem;
this.localScope.setParent(this.getParent().scope() as ScopeImpl);
this.localScope.add(TemplateAliases.M, modelFn);
this.localScope.add(TemplateAliases.V, itemFn);
}
private initIdStrategy(): void {
switch (this.getParams().mode) {
case EachIdStrategies.GENERATED:
this.idStrategy = new GeneratedIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.NONE:
this.idStrategy = new NoneIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.EXPRESSION:
const wkExpr: string = this.getParams().expression;
this.idStrategy = new ExpressionIdStrategyImpl(wkExpr, this.getModule().getCydranContext().logFactory().getLogger(`ExpressionIdStrategy: ${ wkExpr }`));
break;
default:
this.idStrategy = new InvalidIdStrategyImpl();
}
this.idStrategy.init();
}
private parseChildElements(): void {
const children: NodeListOf<ChildNode> = this.getEl().childNodes;
const prefix: string = this.getPrefix();
const validated: boolean = this.isValidated();
let primaryCount: number = 0;
let firstCount: number = 0;
let lastCount: number = 0;
let emptyCount: number = 0;
const errors: Messages = new Messages(`Element with attribute ${ this.getBehaviorPrefix() } is invalid`);
// tslint:disable-next-line
for (let i = 0; i < children.length; i++) {
const child: ChildNode = children[i];
if (child.nodeType === NodeTypes.COMMENT) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length === 0) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length > 0) {
errors.add(`Non-white space text are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template: ${ (child as Text).textContent.trim() }`);
continue;
}
if (child.nodeType !== NodeTypes.ELEMENT || TagNames.TEMPLATE !== child.nodeName.toLowerCase()) {
errors.add(`Elements other than <template> are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template`);
continue;
}
const template: HTMLTemplateElement = child as HTMLTemplateElement;
if (template.content.childElementCount > 1) |
const tagText: string = validated ? elementAsString(template) : null;
const params: EachTemplateAttributes = TEMPLATE_ATTRIBUTE_PARSER.parse(template, prefix, validated, tagText);
switch (params.type) {
case EachTemplateType.EMPTY:
++emptyCount;
this.empty = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.FIRST:
++firstCount;
this | {
errors.add(`template definitions must only have one top-level tag in repeat on expression: ${ this.getExpression() } and markup: ${ template.innerHTML }`);
continue;
} | conditional_block |
EachBehavior.ts | validateNotDefinedIf, validateNotEmptyString, validateNotNullIfFieldEquals, validateOneOf, validateValidId } from 'validator/Validations';
import ComponentTransitions from "component/ComponentTransitions"; | import EachTemplateAttributes from "behavior/core/each/EachTemplateAttributes";
import AttributeParserImpl from "validator/AttributeParserImpl";
import { NodeTypes } from "Constants";
import { ATTRIBUTE_DELIMITER } from "const/HardValues";
import Messages from "util/Messages";
import AbstractContainerBehavior from "behavior/AbstractContainerBehavior";
const DEFAULT_ATTRIBUTES: EachAttributes = {
mode: "generated",
idkey: DEFAULT_ID_KEY,
expression: null
};
const TEMPLATE_ATTRIBUTE_PARSER: AttributeParser<EachTemplateAttributes> = new AttributeParserImpl<EachTemplateAttributes>();
TEMPLATE_ATTRIBUTE_PARSER.setDefaults({
type: null,
test: null,
component: null,
module: null,
value: null
});
TEMPLATE_ATTRIBUTE_PARSER.setValidations({
type: [
validateDefined,
validateOneOf(EachTemplateType.EMPTY, EachTemplateType.FIRST, EachTemplateType.LAST, EachTemplateType.ALT, EachTemplateType.ITEM)
],
test: [validateNotEmptyString, validateNotNullIfFieldEquals(Attrs.TYPE, EachTemplateType.ALT)],
component: [
validateValidId,
validateDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount === 0, "template body was not supplied"),
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
],
module: [
validateValidId,
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
]
});
class EachBehavior extends AbstractContainerBehavior<any[], HTMLElement, EachAttributes> {
private map: SimpleMap<Nestable>;
private empty: Nestable;
private first: Nestable;
private last: Nestable;
private ids: string[];
private localScope: ScopeImpl;
private scopeItem: any;
private itemFactory: ComponentFactory;
private idStrategy: IdStrategy;
private elIsSelect: boolean;
private alternatives: {
test: Evaluator;
factory: ComponentFactory;
}[];
constructor() {
super();
this.setFlag(BehaviorFlags.CHILD_CONSUMPTION_PROHIBITED);
this.setDefaults(DEFAULT_ATTRIBUTES);
this.setValidations({
idkey: [validateDefined, validateNotEmptyString],
expression: [validateNotEmptyString, validateNotNullIfFieldEquals("mode", "expression")],
mode: [validateDefined, validateOneOf('none', 'generated', 'expression')]
});
}
public onInit(): void {
this.elIsSelect = this.getEl().tagName.toLowerCase() === "select";
}
public onMount(): void {
this.initFields();
this.initScope();
this.initIdStrategy();
this.parseChildElements();
this.onTargetChange(null, this.getMediator().get());
if (this.isMutable()) {
this.getMediator().watch(this, this.onTargetChange);
}
this.tellChildren(ComponentTransitions.MOUNT);
}
public onUnmount(): void {
this.tellChildren(ComponentTransitions.UNMOUNT);
}
public onRemount(): void {
this.tellChildren(ComponentTransitions.MOUNT);
}
public requestDigestionSources(sources: DigestableSource[]): void {
for (const key in this.map) {
if (!this.map.hasOwnProperty(key)) {
continue;
}
const component: Nestable = this.map[key];
sources.push(component);
}
if (this.first) {
sources.push(this.first);
}
if (this.last) {
sources.push(this.last);
}
if (this.empty) {
sources.push(this.empty);
}
}
protected onTargetChange(previous: any[], current: any[]): void {
const newIds: string[] = [];
const items: any[] = current || [];
// tslint:disable-next-line
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (!this.idStrategy.check(item)) {
this.idStrategy.enrich(item, i);
}
const id: string = this.idStrategy.extract(item);
newIds.push(id);
}
if (!equals(10, this.ids, newIds)) {
const newMap: SimpleMap<Nestable> = {};
const components: Nestable[] = [];
for (const item of items) {
const id: string = this.idStrategy.extract(item);
const component: Nestable = this.map[id] ? this.map[id] : this.create(item);
newMap[id] = component;
components.push(component);
delete this.map[id];
}
for (const key in this.map) {
if (this.map.hasOwnProperty(key)) {
const component: Nestable = this.map[key];
component.tell(ComponentTransitions.UNMOUNT);
delete this.map[key];
}
}
this.map = newMap;
const el: HTMLElement = this.getEl();
removeChildElements(el);
if (components.length === 0) {
if (this.empty) {
el.appendChild(this.empty.getEl());
}
} else {
const workingEl: HTMLElement | DocumentFragment = this.elIsSelect ? el : this.getDom().createDocumentFragment();
if (this.first) {
workingEl.appendChild(this.first.getEl());
}
for (const component of components) {
workingEl.appendChild(component.getEl());
}
if (this.last) {
workingEl.appendChild(this.last.getEl());
}
if (!this.elIsSelect) {
el.appendChild(workingEl);
}
}
}
this.ids = newIds;
}
private initFields(): void {
this.map = {};
this.empty = null;
this.ids = [];
this.itemFactory = null;
this.alternatives = [];
}
private initScope(): void {
this.localScope = new ScopeImpl(false);
const modelFn: () => any = () => this.getModelFn();
const itemFn: () => any = () => this.scopeItem;
this.localScope.setParent(this.getParent().scope() as ScopeImpl);
this.localScope.add(TemplateAliases.M, modelFn);
this.localScope.add(TemplateAliases.V, itemFn);
}
private initIdStrategy(): void {
switch (this.getParams().mode) {
case EachIdStrategies.GENERATED:
this.idStrategy = new GeneratedIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.NONE:
this.idStrategy = new NoneIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.EXPRESSION:
const wkExpr: string = this.getParams().expression;
this.idStrategy = new ExpressionIdStrategyImpl(wkExpr, this.getModule().getCydranContext().logFactory().getLogger(`ExpressionIdStrategy: ${ wkExpr }`));
break;
default:
this.idStrategy = new InvalidIdStrategyImpl();
}
this.idStrategy.init();
}
private parseChildElements(): void {
const children: NodeListOf<ChildNode> = this.getEl().childNodes;
const prefix: string = this.getPrefix();
const validated: boolean = this.isValidated();
let primaryCount: number = 0;
let firstCount: number = 0;
let lastCount: number = 0;
let emptyCount: number = 0;
const errors: Messages = new Messages(`Element with attribute ${ this.getBehaviorPrefix() } is invalid`);
// tslint:disable-next-line
for (let i = 0; i < children.length; i++) {
const child: ChildNode = children[i];
if (child.nodeType === NodeTypes.COMMENT) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length === 0) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length > 0) {
errors.add(`Non-white space text are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template: ${ (child as Text).textContent.trim() }`);
continue;
}
if (child.nodeType !== NodeTypes.ELEMENT || TagNames.TEMPLATE !== child.nodeName.toLowerCase()) {
errors.add(`Elements other than <template> are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template`);
continue;
}
const template: HTMLTemplateElement = child as HTMLTemplateElement;
if (template.content.childElementCount > 1) {
errors.add(`template definitions must only have one top-level tag in repeat on expression: ${ this.getExpression() } and markup: ${ template.innerHTML }`);
continue;
}
const tagText: string = validated ? elementAsString(template) : null;
const params: EachTemplateAttributes = TEMPLATE_ATTRIBUTE_PARSER.parse(template, prefix, validated, tagText);
switch (params.type) {
case EachTemplateType.EMPTY:
++emptyCount;
this.empty = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.FIRST:
++firstCount;
this.first = | import AttributeParser from 'validator/AttributeParser'; | random_line_split |
EachBehavior.ts | .getMediator().watch(this, this.onTargetChange);
}
this.tellChildren(ComponentTransitions.MOUNT);
}
public onUnmount(): void {
this.tellChildren(ComponentTransitions.UNMOUNT);
}
public onRemount(): void {
this.tellChildren(ComponentTransitions.MOUNT);
}
public requestDigestionSources(sources: DigestableSource[]): void {
for (const key in this.map) {
if (!this.map.hasOwnProperty(key)) {
continue;
}
const component: Nestable = this.map[key];
sources.push(component);
}
if (this.first) {
sources.push(this.first);
}
if (this.last) {
sources.push(this.last);
}
if (this.empty) {
sources.push(this.empty);
}
}
protected onTargetChange(previous: any[], current: any[]): void {
const newIds: string[] = [];
const items: any[] = current || [];
// tslint:disable-next-line
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (!this.idStrategy.check(item)) {
this.idStrategy.enrich(item, i);
}
const id: string = this.idStrategy.extract(item);
newIds.push(id);
}
if (!equals(10, this.ids, newIds)) {
const newMap: SimpleMap<Nestable> = {};
const components: Nestable[] = [];
for (const item of items) {
const id: string = this.idStrategy.extract(item);
const component: Nestable = this.map[id] ? this.map[id] : this.create(item);
newMap[id] = component;
components.push(component);
delete this.map[id];
}
for (const key in this.map) {
if (this.map.hasOwnProperty(key)) {
const component: Nestable = this.map[key];
component.tell(ComponentTransitions.UNMOUNT);
delete this.map[key];
}
}
this.map = newMap;
const el: HTMLElement = this.getEl();
removeChildElements(el);
if (components.length === 0) {
if (this.empty) {
el.appendChild(this.empty.getEl());
}
} else {
const workingEl: HTMLElement | DocumentFragment = this.elIsSelect ? el : this.getDom().createDocumentFragment();
if (this.first) {
workingEl.appendChild(this.first.getEl());
}
for (const component of components) {
workingEl.appendChild(component.getEl());
}
if (this.last) {
workingEl.appendChild(this.last.getEl());
}
if (!this.elIsSelect) {
el.appendChild(workingEl);
}
}
}
this.ids = newIds;
}
private initFields(): void {
this.map = {};
this.empty = null;
this.ids = [];
this.itemFactory = null;
this.alternatives = [];
}
private initScope(): void {
this.localScope = new ScopeImpl(false);
const modelFn: () => any = () => this.getModelFn();
const itemFn: () => any = () => this.scopeItem;
this.localScope.setParent(this.getParent().scope() as ScopeImpl);
this.localScope.add(TemplateAliases.M, modelFn);
this.localScope.add(TemplateAliases.V, itemFn);
}
private initIdStrategy(): void {
switch (this.getParams().mode) {
case EachIdStrategies.GENERATED:
this.idStrategy = new GeneratedIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.NONE:
this.idStrategy = new NoneIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.EXPRESSION:
const wkExpr: string = this.getParams().expression;
this.idStrategy = new ExpressionIdStrategyImpl(wkExpr, this.getModule().getCydranContext().logFactory().getLogger(`ExpressionIdStrategy: ${ wkExpr }`));
break;
default:
this.idStrategy = new InvalidIdStrategyImpl();
}
this.idStrategy.init();
}
private parseChildElements(): void {
const children: NodeListOf<ChildNode> = this.getEl().childNodes;
const prefix: string = this.getPrefix();
const validated: boolean = this.isValidated();
let primaryCount: number = 0;
let firstCount: number = 0;
let lastCount: number = 0;
let emptyCount: number = 0;
const errors: Messages = new Messages(`Element with attribute ${ this.getBehaviorPrefix() } is invalid`);
// tslint:disable-next-line
for (let i = 0; i < children.length; i++) {
const child: ChildNode = children[i];
if (child.nodeType === NodeTypes.COMMENT) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length === 0) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length > 0) {
errors.add(`Non-white space text are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template: ${ (child as Text).textContent.trim() }`);
continue;
}
if (child.nodeType !== NodeTypes.ELEMENT || TagNames.TEMPLATE !== child.nodeName.toLowerCase()) {
errors.add(`Elements other than <template> are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template`);
continue;
}
const template: HTMLTemplateElement = child as HTMLTemplateElement;
if (template.content.childElementCount > 1) {
errors.add(`template definitions must only have one top-level tag in repeat on expression: ${ this.getExpression() } and markup: ${ template.innerHTML }`);
continue;
}
const tagText: string = validated ? elementAsString(template) : null;
const params: EachTemplateAttributes = TEMPLATE_ATTRIBUTE_PARSER.parse(template, prefix, validated, tagText);
switch (params.type) {
case EachTemplateType.EMPTY:
++emptyCount;
this.empty = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.FIRST:
++firstCount;
this.first = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.LAST:
++lastCount;
this.last = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.ALT:
this.alternatives.push({
factory: this.createFactory(template, params, ItemComponentFactoryImpl),
test: new Evaluator(params.test, this.localScope, this.getModule().getCydranContext().logFactory().getLogger(`Evaluator: ${ params.test }`))
});
break;
case EachTemplateType.ITEM:
++primaryCount;
this.itemFactory = this.createFactory(template, params, ItemComponentFactoryImpl);
break;
}
}
errors.addIf(primaryCount !== 1, () => `must have only one child <template ${this.getPrefix()}${ATTRIBUTE_DELIMITER}type="${ EachTemplateType.ITEM }"> node/element.`);
errors.addIf(firstCount > 1, () => `must have only zero or one child <template ${this.getPrefix()}${ATTRIBUTE_DELIMITER}type="${ EachTemplateType.FIRST }"> node/element.`);
errors.addIf(lastCount > 1, () => `must have only zero or one child <template ${this.getPrefix()}${ATTRIBUTE_DELIMITER}type="${ EachTemplateType.LAST }"> node/element.`);
errors.addIf(emptyCount > 1, () => `must have only zero or one child <template ${this.getPrefix()}${ATTRIBUTE_DELIMITER}type="${ EachTemplateType.EMPTY }"> node/element.`);
errors.ifMessages((message) => {
throw new TemplateError(message);
});
const el: HTMLElement = this.getEl();
removeChildElements(el);
if (this.empty) {
el.appendChild(this.empty.getEl());
}
}
private create(item: any): Nestable {
let factory: ComponentFactory = this.itemFactory;
if (!factory) {
throw new TemplateError(`template structure for an ${ EachBehavior.name } structure is incorrect or incomplete`);
}
this.scopeItem = item;
try {
if (this.alternatives.length > 0) {
for (const alternative of this.alternatives) {
if (alternative.test.test()) {
factory = alternative.factory;
break;
}
}
}
} finally {
this.scopeItem = null;
}
return factory.create(item);
}
private createFactory(template: HTMLTemplateElement, params: EachTemplateAttributes, factory: any): ComponentFactory {
const valueFn: () => any = isDefined(params.value) ? () => this.mediate(params.value).get() : this.getValueFn();
return isDefined(params.component)
? new EmbeddedComponentFactoryImpl(this.getModule(), params.component, params.module, this.getParent())
: new factory(this.getModule(), template.innerHTML.trim(), this.getParent().getPrefix(), this.getParent(), this.getParentId(), this.getModelFn(), valueFn);
}
private | tellChildren | identifier_name |
|
EachBehavior.ts | validateNotDefinedIf, validateNotEmptyString, validateNotNullIfFieldEquals, validateOneOf, validateValidId } from 'validator/Validations';
import ComponentTransitions from "component/ComponentTransitions";
import AttributeParser from 'validator/AttributeParser';
import EachTemplateAttributes from "behavior/core/each/EachTemplateAttributes";
import AttributeParserImpl from "validator/AttributeParserImpl";
import { NodeTypes } from "Constants";
import { ATTRIBUTE_DELIMITER } from "const/HardValues";
import Messages from "util/Messages";
import AbstractContainerBehavior from "behavior/AbstractContainerBehavior";
const DEFAULT_ATTRIBUTES: EachAttributes = {
mode: "generated",
idkey: DEFAULT_ID_KEY,
expression: null
};
const TEMPLATE_ATTRIBUTE_PARSER: AttributeParser<EachTemplateAttributes> = new AttributeParserImpl<EachTemplateAttributes>();
TEMPLATE_ATTRIBUTE_PARSER.setDefaults({
type: null,
test: null,
component: null,
module: null,
value: null
});
TEMPLATE_ATTRIBUTE_PARSER.setValidations({
type: [
validateDefined,
validateOneOf(EachTemplateType.EMPTY, EachTemplateType.FIRST, EachTemplateType.LAST, EachTemplateType.ALT, EachTemplateType.ITEM)
],
test: [validateNotEmptyString, validateNotNullIfFieldEquals(Attrs.TYPE, EachTemplateType.ALT)],
component: [
validateValidId,
validateDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount === 0, "template body was not supplied"),
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
],
module: [
validateValidId,
validateNotDefinedIf((template: HTMLTemplateElement) => template.content.childElementCount > 0, "template body was supplied")
]
});
class EachBehavior extends AbstractContainerBehavior<any[], HTMLElement, EachAttributes> {
private map: SimpleMap<Nestable>;
private empty: Nestable;
private first: Nestable;
private last: Nestable;
private ids: string[];
private localScope: ScopeImpl;
private scopeItem: any;
private itemFactory: ComponentFactory;
private idStrategy: IdStrategy;
private elIsSelect: boolean;
private alternatives: {
test: Evaluator;
factory: ComponentFactory;
}[];
constructor() |
public onInit(): void {
this.elIsSelect = this.getEl().tagName.toLowerCase() === "select";
}
public onMount(): void {
this.initFields();
this.initScope();
this.initIdStrategy();
this.parseChildElements();
this.onTargetChange(null, this.getMediator().get());
if (this.isMutable()) {
this.getMediator().watch(this, this.onTargetChange);
}
this.tellChildren(ComponentTransitions.MOUNT);
}
public onUnmount(): void {
this.tellChildren(ComponentTransitions.UNMOUNT);
}
public onRemount(): void {
this.tellChildren(ComponentTransitions.MOUNT);
}
public requestDigestionSources(sources: DigestableSource[]): void {
for (const key in this.map) {
if (!this.map.hasOwnProperty(key)) {
continue;
}
const component: Nestable = this.map[key];
sources.push(component);
}
if (this.first) {
sources.push(this.first);
}
if (this.last) {
sources.push(this.last);
}
if (this.empty) {
sources.push(this.empty);
}
}
protected onTargetChange(previous: any[], current: any[]): void {
const newIds: string[] = [];
const items: any[] = current || [];
// tslint:disable-next-line
for (let i = 0; i < items.length; i++) {
const item = items[i];
if (!this.idStrategy.check(item)) {
this.idStrategy.enrich(item, i);
}
const id: string = this.idStrategy.extract(item);
newIds.push(id);
}
if (!equals(10, this.ids, newIds)) {
const newMap: SimpleMap<Nestable> = {};
const components: Nestable[] = [];
for (const item of items) {
const id: string = this.idStrategy.extract(item);
const component: Nestable = this.map[id] ? this.map[id] : this.create(item);
newMap[id] = component;
components.push(component);
delete this.map[id];
}
for (const key in this.map) {
if (this.map.hasOwnProperty(key)) {
const component: Nestable = this.map[key];
component.tell(ComponentTransitions.UNMOUNT);
delete this.map[key];
}
}
this.map = newMap;
const el: HTMLElement = this.getEl();
removeChildElements(el);
if (components.length === 0) {
if (this.empty) {
el.appendChild(this.empty.getEl());
}
} else {
const workingEl: HTMLElement | DocumentFragment = this.elIsSelect ? el : this.getDom().createDocumentFragment();
if (this.first) {
workingEl.appendChild(this.first.getEl());
}
for (const component of components) {
workingEl.appendChild(component.getEl());
}
if (this.last) {
workingEl.appendChild(this.last.getEl());
}
if (!this.elIsSelect) {
el.appendChild(workingEl);
}
}
}
this.ids = newIds;
}
private initFields(): void {
this.map = {};
this.empty = null;
this.ids = [];
this.itemFactory = null;
this.alternatives = [];
}
private initScope(): void {
this.localScope = new ScopeImpl(false);
const modelFn: () => any = () => this.getModelFn();
const itemFn: () => any = () => this.scopeItem;
this.localScope.setParent(this.getParent().scope() as ScopeImpl);
this.localScope.add(TemplateAliases.M, modelFn);
this.localScope.add(TemplateAliases.V, itemFn);
}
private initIdStrategy(): void {
switch (this.getParams().mode) {
case EachIdStrategies.GENERATED:
this.idStrategy = new GeneratedIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.NONE:
this.idStrategy = new NoneIdStrategyImpl(this.getParams().idkey);
break;
case EachIdStrategies.EXPRESSION:
const wkExpr: string = this.getParams().expression;
this.idStrategy = new ExpressionIdStrategyImpl(wkExpr, this.getModule().getCydranContext().logFactory().getLogger(`ExpressionIdStrategy: ${ wkExpr }`));
break;
default:
this.idStrategy = new InvalidIdStrategyImpl();
}
this.idStrategy.init();
}
private parseChildElements(): void {
const children: NodeListOf<ChildNode> = this.getEl().childNodes;
const prefix: string = this.getPrefix();
const validated: boolean = this.isValidated();
let primaryCount: number = 0;
let firstCount: number = 0;
let lastCount: number = 0;
let emptyCount: number = 0;
const errors: Messages = new Messages(`Element with attribute ${ this.getBehaviorPrefix() } is invalid`);
// tslint:disable-next-line
for (let i = 0; i < children.length; i++) {
const child: ChildNode = children[i];
if (child.nodeType === NodeTypes.COMMENT) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length === 0) {
continue;
}
if (child.nodeType === NodeTypes.TEXT && (child as Text).textContent.trim().length > 0) {
errors.add(`Non-white space text are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template: ${ (child as Text).textContent.trim() }`);
continue;
}
if (child.nodeType !== NodeTypes.ELEMENT || TagNames.TEMPLATE !== child.nodeName.toLowerCase()) {
errors.add(`Elements other than <template> are not allowed when the parent element has a ${ prefix } attribute present on an element as part of a Cydran component template`);
continue;
}
const template: HTMLTemplateElement = child as HTMLTemplateElement;
if (template.content.childElementCount > 1) {
errors.add(`template definitions must only have one top-level tag in repeat on expression: ${ this.getExpression() } and markup: ${ template.innerHTML }`);
continue;
}
const tagText: string = validated ? elementAsString(template) : null;
const params: EachTemplateAttributes = TEMPLATE_ATTRIBUTE_PARSER.parse(template, prefix, validated, tagText);
switch (params.type) {
case EachTemplateType.EMPTY:
++emptyCount;
this.empty = this.createFactory(template, params, UtilityComponentFactoryImpl).create();
break;
case EachTemplateType.FIRST:
++firstCount;
this | {
super();
this.setFlag(BehaviorFlags.CHILD_CONSUMPTION_PROHIBITED);
this.setDefaults(DEFAULT_ATTRIBUTES);
this.setValidations({
idkey: [validateDefined, validateNotEmptyString],
expression: [validateNotEmptyString, validateNotNullIfFieldEquals("mode", "expression")],
mode: [validateDefined, validateOneOf('none', 'generated', 'expression')]
});
} | identifier_body |
submittestevent.go | CaseValidatorFunc function pointer for calling the files
RequestParamsCaseValidatorFunc = common.RequestParamsCaseValidator
)
// SubmitTestEvent is a helper method to handle the submit test event request.
func (e *ExternalInterfaces) SubmitTestEvent(ctx context.Context, req *eventsproto.EventSubRequest) response.RPC {
var resp response.RPC
authResp, err := e.Auth(ctx, req.SessionToken, []string{common.PrivilegeConfigureComponents}, []string{})
if authResp.StatusCode != http.StatusOK {
errMsg := fmt.Sprintf("error while trying to authenticate session: status code: %v, status message: %v", authResp.StatusCode, authResp.StatusMessage)
if err != nil {
errMsg = errMsg + ": " + err.Error()
}
l.LogWithFields(ctx).Error(errMsg)
return authResp
}
// First get the UserName from SessionToken
sessionUserName, err := e.GetSessionUserName(ctx, req.SessionToken)
if err != nil {
// handle the error case with appropriate response body
errMsg := "error while trying to authenticate session: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusUnauthorized, response.NoValidSession, errMsg, nil, nil)
}
testEvent, statusMessage, errMsg, msgArgs := validAndGenSubTestReq(req.PostBody)
if statusMessage != response.Success {
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusBadRequest, statusMessage, errMsg, msgArgs, nil)
}
// parsing the event
var eventObj interface{}
err = JSONUnmarshal(req.PostBody, &eventObj)
if err != nil {
errMsg := "unable to parse the event request" + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// Validating the request JSON properties for case sensitive
invalidProperties, err := RequestParamsCaseValidatorFunc(req.PostBody, eventObj)
if err != nil {
errMsg := "error while validating request parameters: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
} else if invalidProperties != "" {
errorMessage := "error: one or more properties given in the request body are not valid, ensure properties are listed in upper camel case "
l.LogWithFields(ctx).Error(errorMessage)
resp := common.GeneralError(http.StatusBadRequest, response.PropertyUnknown, errorMessage, []interface{}{invalidProperties}, nil)
return resp
}
// Find out all the subscription destinations of the requesting user
subscriptions, err := e.GetEvtSubscriptions(sessionUserName)
if err != nil {
// Internal error
errMsg := "error while trying to find the event destination"
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// we need common.MessageData to find the correct destination to send test event
var message common.MessageData
message.Events = append(message.Events, *testEvent)
messageBytes, _ := json.Marshal(message)
eventUniqueID := uuid.NewV4().String()
for _, sub := range subscriptions {
for _, origin := range sub.EventDestination.OriginResources {
if sub.EventDestination.Destination != "" {
subscription := *sub.EventDestination
subscription.ID = sub.SubscriptionID
if filterEventsToBeForwarded(ctx, subscription, message.Events[0], []model.Link{{Oid: origin.Oid}}) {
l.LogWithFields(ctx).Info("Destination: " + sub.EventDestination.Destination)
go e.postEvent(evmodel.EventPost{Destination: sub.EventDestination.Destination, EventID: eventUniqueID,
Message: messageBytes})
}
}
}
}
resp.StatusCode = http.StatusOK
resp.StatusMessage = response.Success
resp.Body = response.ErrorClass{
Code: resp.StatusMessage,
Message: "Request completed successfully.",
}
return resp
}
func validAndGenSubTestReq(reqBody []byte) (*common.Event, string, string, []interface{}) {
var testEvent common.Event
var req map[string]interface{}
json.Unmarshal(reqBody, &req)
if val, ok := req["MessageId"]; ok {
switch v := val.(type) {
case string:
testEvent.MessageID = v
default:
return nil, response.PropertyValueTypeError,
"error: required parameter MessageId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "MessageId"}
}
} else {
return nil, response.PropertyMissing, "error: MessageId is a required parameter",
[]interface{}{"MessageId"}
}
if val, ok := req["EventGroupId"]; ok {
switch v := val.(type) {
case int:
testEvent.EventGroupID = v
case float64:
testEvent.EventGroupID = int(v)
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventGroupId must be of type integer",
[]interface{}{fmt.Sprintf("%v", v), "EventGroupId"}
}
}
if val, ok := req["EventId"]; ok {
switch v := val.(type) {
case string:
testEvent.EventID = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventId"}
}
}
if val, ok := req["EventTimestamp"]; ok {
switch v := val.(type) {
case string:
_, err := time.Parse(time.RFC3339, v)
if err != nil {
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of valid date time format",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
testEvent.EventTimestamp = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
}
if val, ok := req["EventType"]; ok {
switch v := val.(type) {
case string:
if ok = validEventType(v); ok {
testEvent.EventType = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter EventType must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventType must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
}
if val, ok := req["Message"]; ok {
switch v := val.(type) {
case string:
testEvent.Message = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Message must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Message"}
}
}
if val, ok := req["MessageArgs"]; ok {
switch v := val.(type) {
case []string:
testEvent.MessageArgs = v
case []interface{}:
msg, _ := json.Marshal(v)
var msgArgs []string
json.Unmarshal(msg, &msgArgs)
testEvent.MessageArgs = msgArgs
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter MessageArgs must be of type array(string)",
[]interface{}{fmt.Sprintf("%v", v), "MessageArgs"}
}
}
if val, ok := req["OriginOfCondition"]; ok {
switch v := val.(type) {
case string:
// As per EventService spec in the SubmitTestEvent schema
// OriginOfCondition is a string. However we need to convert
// this to an object as the event publisher will drop these events.
testEvent.OriginOfCondition = &common.Link{
Oid: v,
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter OriginOfCondition must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "OriginOfCondition"}
}
}
if val, ok := req["Severity"]; ok {
switch v := val.(type) {
case string:
if ok = validSeverity(v); ok {
testEvent.Severity = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter Severity must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "Severity"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Severity must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Severity"}
}
}
return &testEvent, response.Success, "", nil
}
func validEventType(got string) bool | {
events := getAllowedEventTypes()
for _, event := range events {
if event == got {
return true
}
}
return false
} | identifier_body |
|
submittestevent.go | var (
//JSONUnmarshal function pointer for calling the files
JSONUnmarshal = json.Unmarshal
//RequestParamsCaseValidatorFunc function pointer for calling the files
RequestParamsCaseValidatorFunc = common.RequestParamsCaseValidator
)
// SubmitTestEvent is a helper method to handle the submit test event request.
func (e *ExternalInterfaces) SubmitTestEvent(ctx context.Context, req *eventsproto.EventSubRequest) response.RPC {
var resp response.RPC
authResp, err := e.Auth(ctx, req.SessionToken, []string{common.PrivilegeConfigureComponents}, []string{})
if authResp.StatusCode != http.StatusOK {
errMsg := fmt.Sprintf("error while trying to authenticate session: status code: %v, status message: %v", authResp.StatusCode, authResp.StatusMessage)
if err != nil {
errMsg = errMsg + ": " + err.Error()
}
l.LogWithFields(ctx).Error(errMsg)
return authResp
}
// First get the UserName from SessionToken
sessionUserName, err := e.GetSessionUserName(ctx, req.SessionToken)
if err != nil {
// handle the error case with appropriate response body
errMsg := "error while trying to authenticate session: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusUnauthorized, response.NoValidSession, errMsg, nil, nil)
}
testEvent, statusMessage, errMsg, msgArgs := validAndGenSubTestReq(req.PostBody)
if statusMessage != response.Success {
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusBadRequest, statusMessage, errMsg, msgArgs, nil)
}
// parsing the event
var eventObj interface{}
err = JSONUnmarshal(req.PostBody, &eventObj)
if err != nil {
errMsg := "unable to parse the event request" + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// Validating the request JSON properties for case sensitive
invalidProperties, err := RequestParamsCaseValidatorFunc(req.PostBody, eventObj)
if err != nil {
errMsg := "error while validating request parameters: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
} else if invalidProperties != "" {
errorMessage := "error: one or more properties given in the request body are not valid, ensure properties are listed in upper camel case "
l.LogWithFields(ctx).Error(errorMessage)
resp := common.GeneralError(http.StatusBadRequest, response.PropertyUnknown, errorMessage, []interface{}{invalidProperties}, nil)
return resp
}
// Find out all the subscription destinations of the requesting user
subscriptions, err := e.GetEvtSubscriptions(sessionUserName)
if err != nil {
// Internal error
errMsg := "error while trying to find the event destination"
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// we need common.MessageData to find the correct destination to send test event
var message common.MessageData
message.Events = append(message.Events, *testEvent)
messageBytes, _ := json.Marshal(message)
eventUniqueID := uuid.NewV4().String()
for _, sub := range subscriptions {
for _, origin := range sub.EventDestination.OriginResources {
if sub.EventDestination.Destination != "" {
subscription := *sub.EventDestination
subscription.ID = sub.SubscriptionID
if filterEventsToBeForwarded(ctx, subscription, message.Events[0], []model.Link{{Oid: origin.Oid}}) {
l.LogWithFields(ctx).Info("Destination: " + sub.EventDestination.Destination)
go e.postEvent(evmodel.EventPost{Destination: sub.EventDestination.Destination, EventID: eventUniqueID,
Message: messageBytes})
}
}
}
}
resp.StatusCode = http.StatusOK
resp.StatusMessage = response.Success
resp.Body = response.ErrorClass{
Code: resp.StatusMessage,
Message: "Request completed successfully.",
}
return resp
}
func validAndGenSubTestReq(reqBody []byte) (*common.Event, string, string, []interface{}) {
var testEvent common.Event
var req map[string]interface{}
json.Unmarshal(reqBody, &req)
if val, ok := req["MessageId"]; ok {
switch v := val.(type) {
case string:
testEvent.MessageID = v
default:
return nil, response.PropertyValueTypeError,
"error: required parameter MessageId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "MessageId"}
}
} else {
return nil, response.PropertyMissing, "error: MessageId is a required parameter",
[]interface{}{"MessageId"}
}
if val, ok := req["EventGroupId"]; ok {
switch v := val.(type) {
case int:
testEvent.EventGroupID = v
case float64:
testEvent.EventGroupID = int(v)
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventGroupId must be of type integer",
[]interface{}{fmt.Sprintf("%v", v), "EventGroupId"}
}
}
if val, ok := req["EventId"]; ok {
switch v := val.(type) {
case string:
testEvent.EventID = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventId"}
}
}
if val, ok := req["EventTimestamp"]; ok {
switch v := val.(type) {
case string:
_, err := time.Parse(time.RFC3339, v)
if err != nil {
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of valid date time format",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
testEvent.EventTimestamp = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
}
if val, ok := req["EventType"]; ok {
switch v := val.(type) {
case string:
if ok = validEventType(v); ok {
testEvent.EventType = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter EventType must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventType must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
}
if val, ok := req["Message"]; ok {
switch v := val.(type) {
case string:
testEvent.Message = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Message must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Message"}
}
}
if val, ok := req["MessageArgs"]; ok {
switch v := val.(type) {
case []string:
testEvent.MessageArgs = v
case []interface{}:
msg, _ := json.Marshal(v)
var msgArgs []string
json.Unmarshal(msg, &msgArgs)
testEvent.MessageArgs = msgArgs
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter MessageArgs must be of type array(string)",
[]interface{}{fmt.Sprintf("%v", v), "MessageArgs"}
}
}
if val, ok := req["OriginOfCondition"]; ok {
switch v := val.(type) {
case string:
// As per EventService spec in the SubmitTestEvent schema
// OriginOfCondition is a string. However we need to convert
// this to an object as the event publisher will drop these events.
testEvent.OriginOfCondition = &common.Link{
Oid: v,
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter OriginOfCondition must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "OriginOfCondition"}
}
}
if val, ok := req["Severity"]; ok {
switch v := val.(type) {
case string:
if ok = validSeverity(v); ok {
testEvent.Severity = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter Severity must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "Severity"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Severity must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Severity"}
}
}
return &testEvent, response.Success, "", nil
}
| func validEventType(got string) bool {
events := getAllowedEventTypes() | random_line_split |
|
submittestevent.go | have the functionality of
// - Create Event Subscription
// - Delete Event Subscription
// - Get Event Subscription
// - Post Event Subscription to destination
// - Post TestEvent (SubmitTestEvent)
// and corresponding unit test cases
package events
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
uuid "github.com/satori/go.uuid"
"github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
eventsproto "github.com/ODIM-Project/ODIM/lib-utilities/proto/events"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-events/evmodel"
)
var (
//JSONUnmarshal function pointer for calling the files
JSONUnmarshal = json.Unmarshal
//RequestParamsCaseValidatorFunc function pointer for calling the files
RequestParamsCaseValidatorFunc = common.RequestParamsCaseValidator
)
// SubmitTestEvent is a helper method to handle the submit test event request.
func (e *ExternalInterfaces) SubmitTestEvent(ctx context.Context, req *eventsproto.EventSubRequest) response.RPC {
var resp response.RPC
authResp, err := e.Auth(ctx, req.SessionToken, []string{common.PrivilegeConfigureComponents}, []string{})
if authResp.StatusCode != http.StatusOK {
errMsg := fmt.Sprintf("error while trying to authenticate session: status code: %v, status message: %v", authResp.StatusCode, authResp.StatusMessage)
if err != nil {
errMsg = errMsg + ": " + err.Error()
}
l.LogWithFields(ctx).Error(errMsg)
return authResp
}
// First get the UserName from SessionToken
sessionUserName, err := e.GetSessionUserName(ctx, req.SessionToken)
if err != nil {
// handle the error case with appropriate response body
errMsg := "error while trying to authenticate session: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusUnauthorized, response.NoValidSession, errMsg, nil, nil)
}
testEvent, statusMessage, errMsg, msgArgs := validAndGenSubTestReq(req.PostBody)
if statusMessage != response.Success {
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusBadRequest, statusMessage, errMsg, msgArgs, nil)
}
// parsing the event
var eventObj interface{}
err = JSONUnmarshal(req.PostBody, &eventObj)
if err != nil {
errMsg := "unable to parse the event request" + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// Validating the request JSON properties for case sensitive
invalidProperties, err := RequestParamsCaseValidatorFunc(req.PostBody, eventObj)
if err != nil {
errMsg := "error while validating request parameters: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
} else if invalidProperties != "" {
errorMessage := "error: one or more properties given in the request body are not valid, ensure properties are listed in upper camel case "
l.LogWithFields(ctx).Error(errorMessage)
resp := common.GeneralError(http.StatusBadRequest, response.PropertyUnknown, errorMessage, []interface{}{invalidProperties}, nil)
return resp
}
// Find out all the subscription destinations of the requesting user
subscriptions, err := e.GetEvtSubscriptions(sessionUserName)
if err != nil {
// Internal error
errMsg := "error while trying to find the event destination"
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// we need common.MessageData to find the correct destination to send test event
var message common.MessageData
message.Events = append(message.Events, *testEvent)
messageBytes, _ := json.Marshal(message)
eventUniqueID := uuid.NewV4().String()
for _, sub := range subscriptions {
for _, origin := range sub.EventDestination.OriginResources {
if sub.EventDestination.Destination != "" {
subscription := *sub.EventDestination
subscription.ID = sub.SubscriptionID
if filterEventsToBeForwarded(ctx, subscription, message.Events[0], []model.Link{{Oid: origin.Oid}}) {
l.LogWithFields(ctx).Info("Destination: " + sub.EventDestination.Destination)
go e.postEvent(evmodel.EventPost{Destination: sub.EventDestination.Destination, EventID: eventUniqueID,
Message: messageBytes})
}
}
}
}
resp.StatusCode = http.StatusOK
resp.StatusMessage = response.Success
resp.Body = response.ErrorClass{
Code: resp.StatusMessage,
Message: "Request completed successfully.",
}
return resp
}
func | (reqBody []byte) (*common.Event, string, string, []interface{}) {
var testEvent common.Event
var req map[string]interface{}
json.Unmarshal(reqBody, &req)
if val, ok := req["MessageId"]; ok {
switch v := val.(type) {
case string:
testEvent.MessageID = v
default:
return nil, response.PropertyValueTypeError,
"error: required parameter MessageId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "MessageId"}
}
} else {
return nil, response.PropertyMissing, "error: MessageId is a required parameter",
[]interface{}{"MessageId"}
}
if val, ok := req["EventGroupId"]; ok {
switch v := val.(type) {
case int:
testEvent.EventGroupID = v
case float64:
testEvent.EventGroupID = int(v)
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventGroupId must be of type integer",
[]interface{}{fmt.Sprintf("%v", v), "EventGroupId"}
}
}
if val, ok := req["EventId"]; ok {
switch v := val.(type) {
case string:
testEvent.EventID = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventId"}
}
}
if val, ok := req["EventTimestamp"]; ok {
switch v := val.(type) {
case string:
_, err := time.Parse(time.RFC3339, v)
if err != nil {
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of valid date time format",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
testEvent.EventTimestamp = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
}
if val, ok := req["EventType"]; ok {
switch v := val.(type) {
case string:
if ok = validEventType(v); ok {
testEvent.EventType = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter EventType must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventType must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
}
if val, ok := req["Message"]; ok {
switch v := val.(type) {
case string:
testEvent.Message = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Message must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Message"}
}
}
if val, ok := req["MessageArgs"]; ok {
switch v := val.(type) {
case []string:
testEvent.MessageArgs = v
case []interface{}:
msg, _ := json.Marshal(v)
var msgArgs []string
json.Unmarshal(msg, &msgArgs)
testEvent.MessageArgs = msgArgs
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter MessageArgs must be of type array(string)",
[]interface{}{fmt.Sprintf("%v", v), "MessageArgs"}
}
}
if val, ok := req["OriginOfCondition"]; ok {
switch v := val.(type) {
case string:
// As per EventService spec in the SubmitTestEvent schema
// OriginOfCondition is a string. However we need to convert
// this to an object as the event publisher will drop these events.
testEvent.OriginOfCondition = &common.Link{
Oid: v,
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter OriginOfCondition must be of type string",
[]interface | validAndGenSubTestReq | identifier_name |
submittestevent.go | have the functionality of
// - Create Event Subscription
// - Delete Event Subscription
// - Get Event Subscription
// - Post Event Subscription to destination
// - Post TestEvent (SubmitTestEvent)
// and corresponding unit test cases
package events
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
uuid "github.com/satori/go.uuid"
"github.com/ODIM-Project/ODIM/lib-dmtf/model"
"github.com/ODIM-Project/ODIM/lib-utilities/common"
l "github.com/ODIM-Project/ODIM/lib-utilities/logs"
eventsproto "github.com/ODIM-Project/ODIM/lib-utilities/proto/events"
"github.com/ODIM-Project/ODIM/lib-utilities/response"
"github.com/ODIM-Project/ODIM/svc-events/evmodel"
)
var (
//JSONUnmarshal function pointer for calling the files
JSONUnmarshal = json.Unmarshal
//RequestParamsCaseValidatorFunc function pointer for calling the files
RequestParamsCaseValidatorFunc = common.RequestParamsCaseValidator
)
// SubmitTestEvent is a helper method to handle the submit test event request.
func (e *ExternalInterfaces) SubmitTestEvent(ctx context.Context, req *eventsproto.EventSubRequest) response.RPC {
var resp response.RPC
authResp, err := e.Auth(ctx, req.SessionToken, []string{common.PrivilegeConfigureComponents}, []string{})
if authResp.StatusCode != http.StatusOK {
errMsg := fmt.Sprintf("error while trying to authenticate session: status code: %v, status message: %v", authResp.StatusCode, authResp.StatusMessage)
if err != nil {
errMsg = errMsg + ": " + err.Error()
}
l.LogWithFields(ctx).Error(errMsg)
return authResp
}
// First get the UserName from SessionToken
sessionUserName, err := e.GetSessionUserName(ctx, req.SessionToken)
if err != nil {
// handle the error case with appropriate response body
errMsg := "error while trying to authenticate session: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusUnauthorized, response.NoValidSession, errMsg, nil, nil)
}
testEvent, statusMessage, errMsg, msgArgs := validAndGenSubTestReq(req.PostBody)
if statusMessage != response.Success {
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusBadRequest, statusMessage, errMsg, msgArgs, nil)
}
// parsing the event
var eventObj interface{}
err = JSONUnmarshal(req.PostBody, &eventObj)
if err != nil {
errMsg := "unable to parse the event request" + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
}
// Validating the request JSON properties for case sensitive
invalidProperties, err := RequestParamsCaseValidatorFunc(req.PostBody, eventObj)
if err != nil {
errMsg := "error while validating request parameters: " + err.Error()
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
} else if invalidProperties != "" {
errorMessage := "error: one or more properties given in the request body are not valid, ensure properties are listed in upper camel case "
l.LogWithFields(ctx).Error(errorMessage)
resp := common.GeneralError(http.StatusBadRequest, response.PropertyUnknown, errorMessage, []interface{}{invalidProperties}, nil)
return resp
}
// Find out all the subscription destinations of the requesting user
subscriptions, err := e.GetEvtSubscriptions(sessionUserName)
if err != nil |
// we need common.MessageData to find the correct destination to send test event
var message common.MessageData
message.Events = append(message.Events, *testEvent)
messageBytes, _ := json.Marshal(message)
eventUniqueID := uuid.NewV4().String()
for _, sub := range subscriptions {
for _, origin := range sub.EventDestination.OriginResources {
if sub.EventDestination.Destination != "" {
subscription := *sub.EventDestination
subscription.ID = sub.SubscriptionID
if filterEventsToBeForwarded(ctx, subscription, message.Events[0], []model.Link{{Oid: origin.Oid}}) {
l.LogWithFields(ctx).Info("Destination: " + sub.EventDestination.Destination)
go e.postEvent(evmodel.EventPost{Destination: sub.EventDestination.Destination, EventID: eventUniqueID,
Message: messageBytes})
}
}
}
}
resp.StatusCode = http.StatusOK
resp.StatusMessage = response.Success
resp.Body = response.ErrorClass{
Code: resp.StatusMessage,
Message: "Request completed successfully.",
}
return resp
}
func validAndGenSubTestReq(reqBody []byte) (*common.Event, string, string, []interface{}) {
var testEvent common.Event
var req map[string]interface{}
json.Unmarshal(reqBody, &req)
if val, ok := req["MessageId"]; ok {
switch v := val.(type) {
case string:
testEvent.MessageID = v
default:
return nil, response.PropertyValueTypeError,
"error: required parameter MessageId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "MessageId"}
}
} else {
return nil, response.PropertyMissing, "error: MessageId is a required parameter",
[]interface{}{"MessageId"}
}
if val, ok := req["EventGroupId"]; ok {
switch v := val.(type) {
case int:
testEvent.EventGroupID = v
case float64:
testEvent.EventGroupID = int(v)
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventGroupId must be of type integer",
[]interface{}{fmt.Sprintf("%v", v), "EventGroupId"}
}
}
if val, ok := req["EventId"]; ok {
switch v := val.(type) {
case string:
testEvent.EventID = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventId must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventId"}
}
}
if val, ok := req["EventTimestamp"]; ok {
switch v := val.(type) {
case string:
_, err := time.Parse(time.RFC3339, v)
if err != nil {
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of valid date time format",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
testEvent.EventTimestamp = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventTimestamp must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventTimestamp"}
}
}
if val, ok := req["EventType"]; ok {
switch v := val.(type) {
case string:
if ok = validEventType(v); ok {
testEvent.EventType = v
} else {
return nil, response.PropertyValueNotInList,
"error: optional parameter EventType must have allowed value",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter EventType must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "EventType"}
}
}
if val, ok := req["Message"]; ok {
switch v := val.(type) {
case string:
testEvent.Message = v
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter Message must be of type string",
[]interface{}{fmt.Sprintf("%v", v), "Message"}
}
}
if val, ok := req["MessageArgs"]; ok {
switch v := val.(type) {
case []string:
testEvent.MessageArgs = v
case []interface{}:
msg, _ := json.Marshal(v)
var msgArgs []string
json.Unmarshal(msg, &msgArgs)
testEvent.MessageArgs = msgArgs
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter MessageArgs must be of type array(string)",
[]interface{}{fmt.Sprintf("%v", v), "MessageArgs"}
}
}
if val, ok := req["OriginOfCondition"]; ok {
switch v := val.(type) {
case string:
// As per EventService spec in the SubmitTestEvent schema
// OriginOfCondition is a string. However we need to convert
// this to an object as the event publisher will drop these events.
testEvent.OriginOfCondition = &common.Link{
Oid: v,
}
default:
return nil, response.PropertyValueTypeError,
"error: optional parameter OriginOfCondition must be of type string",
[] | {
// Internal error
errMsg := "error while trying to find the event destination"
l.LogWithFields(ctx).Error(errMsg)
return common.GeneralError(http.StatusInternalServerError, response.InternalError, errMsg, nil, nil)
} | conditional_block |
index.js | ////////////////////
var date = new Date();
var d = date.getDate();
var m = date.getMonth();
var y = date.getFullYear();
$scope.changeTo = 'Hungarian';
/* event source that pulls from google.com */
$scope.eventSource = {
url: "http://www.google.com/calendar/feeds/usa__en%40holiday.calendar.google.com/public/basic",
className: 'gcal-event', // an option!
currentTimezone: 'America/Chicago' // an option!
};
/* event source that contains custom events on the scope */
// $scope.events = [
// {title: 'All Day Event',start: new Date(y, m, 1)},
// {title: 'Long Event',start: new Date(y, m, d - 5),end: new Date(y, m, d - 2)},
// {id: 999,title: 'Repeating Event',start: new Date(y, m, d - 3, 16, 0),allDay: false},
// {id: 999,title: 'Repeating Event',start: new Date(y, m, d + 4, 16, 0),allDay: false},
// {title: 'Birthday Party',start: new Date(y, m, d + 1, 19, 0),end: new Date(y, m, d + 1, 22, 30),allDay: false},
// {title: 'Click for Google',start: new Date(y, m, 28),end: new Date(y, m, 29),url: 'http://google.com/'}
// ];
/* event source that calls a function on every view switch */
$scope.eventsF = function (start, end, timezone, callback) {
var s = new Date(start).getTime() / 1000;
var e = new Date(end).getTime() / 1000;
var m = new Date(start).getMonth();
var events = [{title: 'Feed Me ' + m,start: s + (50000),end: s + (100000),allDay: false, className: ['customFeed']}];
callback(events);
};
$scope.calEventsExt = {
color: '#f00',
textColor: 'yellow',
events: [
{type:'party',title: 'Lunch',start: new Date(y, m, d, 12, 0),end: new Date(y, m, d, 14, 0),allDay: false},
{type:'party',title: 'Lunch 2',start: new Date(y, m, d, 12, 0),end: new Date(y, m, d, 14, 0),allDay: false},
{type:'party',title: 'Click for Google',start: new Date(y, m, 28),end: new Date(y, m, 29),url: 'http://google.com/'}
]
};
/* alert on eventClick */
$scope.alertOnEventClick = function( date, jsEvent, view){
$scope.alertMessage = (date.title + ' was clicked ');
};
/* alert on Drop */
$scope.alertOnDrop = function(event, delta, revertFunc, jsEvent, ui, view){
$scope.alertMessage = ('Event Droped to make dayDelta ' + delta);
};
/* alert on Resize */
$scope.alertOnResize = function(event, delta, revertFunc, jsEvent, ui, view ){
$scope.alertMessage = ('Event Resized to make dayDelta ' + delta);
};
/* add and removes an event source of choice */
$scope.addRemoveEventSource = function(sources,source) {
var canAdd = 0;
angular.forEach(sources,function(value, key){
if(sources[key] === source){
sources.splice(key,1);
canAdd = 1;
}
});
if(canAdd === 0){
sources.push(source);
}
};
/* add custom event*/
$scope.addEvent = function() {
$scope.events.push({
title: 'Open Sesame',
start: new Date(y, m, 28),
end: new Date(y, m, 29),
className: ['openSesame']
});
};
/* remove event */
$scope.remove = function(index) {
$scope.events.splice(index,1);
};
/* Change View */
$scope.changeView = function(view,calendar) {
uiCalendarConfig.calendars[calendar].fullCalendar( 'changeView', viewName );
};
/* Change View */
$scope.renderCalender = function(calendar) {
if(uiCalendarConfig.calendars[calendar]){
uiCalendarConfig.calendars[calendar].fullCalendar('render');
}
};
/* Render Tooltip */
$scope.eventRender = function( event, element, view ) {
element.attr({'tooltip': event.title,
'tooltip-append-to-body': true});
$compile(element)($scope);
};
/* config object */
$scope.uiConfig = {
calendar:{
height: 450,
editable: true,
header:{
left: 'title',
center: '',
right: 'today prev,next'
},
eventClick: $scope.alertOnEventClick,
eventDrop: $scope.alertOnDrop,
eventResize: $scope.alertOnResize,
eventRender: $scope.eventRender
}
};
$scope.changeLang = function() {
if($scope.changeTo === 'Hungarian'){
$scope.uiConfig.calendar.dayNames = ["Vasárnap", "Hétfő", "Kedd", "Szerda", "Csütörtök", "Péntek", "Szombat"];
$scope.uiConfig.calendar.dayNamesShort = ["Vas", "Hét", "Kedd", "Sze", "Csüt", "Pén", "Szo"];
$scope.changeTo= 'English';
} else {
$scope.uiConfig.calendar.dayNames = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"];
$scope.uiConfig.calendar.dayNamesShort = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
$scope.changeTo = 'Hungarian';
}
};
/* event sources array*/
$scope.eventSources = [$scope.events, $scope.eventSource, $scope.events];
$scope.eventSources2 = [$scope.calEventsExt, $scope.eventsF, $scope.events];
//////////////////// FINAL CALENDAR ////////////////////
////////// POPOVER ///////////////
$ionicPopover.fromTemplateUrl('templates/calendario.html', {
scope: $scope,
}).then(function(popover) {
$scope.calendario = popover; | }).then(function(popover) {
$scope.recado = popover;
});
$ionicPopover.fromTemplateUrl('templates/user-filho.html', {
scope: $scope,
}).then(function(popover) {
$scope.UserFilho = popover;
});
$scope.scrap = [
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.',
image:[{
thumb:'img/filho-01.jpg'
}],
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
}
];
$scope.userfilho = [
{
id: 'Nome do primero filho nome nome nome',
images:[{
thumb:'img/filho-01.jpg'
}],
},
{
id: 'Nome do segundo filho',
images:[{
thumb:'img/filho-02.jpg'
}],
}
];
////////// FINAL DO POPOVER ///////////////
////////// ACORDION RECADOS ESCOLA ///////////////
$scope.groups = [
{
name: " | });
$ionicPopover.fromTemplateUrl('templates/recado.html', {
scope: $scope, | random_line_split |
index.js | 0),allDay: false},
// {title: 'Click for Google',start: new Date(y, m, 28),end: new Date(y, m, 29),url: 'http://google.com/'}
// ];
/* event source that calls a function on every view switch */
$scope.eventsF = function (start, end, timezone, callback) {
var s = new Date(start).getTime() / 1000;
var e = new Date(end).getTime() / 1000;
var m = new Date(start).getMonth();
var events = [{title: 'Feed Me ' + m,start: s + (50000),end: s + (100000),allDay: false, className: ['customFeed']}];
callback(events);
};
$scope.calEventsExt = {
color: '#f00',
textColor: 'yellow',
events: [
{type:'party',title: 'Lunch',start: new Date(y, m, d, 12, 0),end: new Date(y, m, d, 14, 0),allDay: false},
{type:'party',title: 'Lunch 2',start: new Date(y, m, d, 12, 0),end: new Date(y, m, d, 14, 0),allDay: false},
{type:'party',title: 'Click for Google',start: new Date(y, m, 28),end: new Date(y, m, 29),url: 'http://google.com/'}
]
};
/* alert on eventClick */
$scope.alertOnEventClick = function( date, jsEvent, view){
$scope.alertMessage = (date.title + ' was clicked ');
};
/* alert on Drop */
$scope.alertOnDrop = function(event, delta, revertFunc, jsEvent, ui, view){
$scope.alertMessage = ('Event Droped to make dayDelta ' + delta);
};
/* alert on Resize */
$scope.alertOnResize = function(event, delta, revertFunc, jsEvent, ui, view ){
$scope.alertMessage = ('Event Resized to make dayDelta ' + delta);
};
/* add and removes an event source of choice */
$scope.addRemoveEventSource = function(sources,source) {
var canAdd = 0;
angular.forEach(sources,function(value, key){
if(sources[key] === source){
sources.splice(key,1);
canAdd = 1;
}
});
if(canAdd === 0){
sources.push(source);
}
};
/* add custom event*/
$scope.addEvent = function() {
$scope.events.push({
title: 'Open Sesame',
start: new Date(y, m, 28),
end: new Date(y, m, 29),
className: ['openSesame']
});
};
/* remove event */
$scope.remove = function(index) {
$scope.events.splice(index,1);
};
/* Change View */
$scope.changeView = function(view,calendar) {
uiCalendarConfig.calendars[calendar].fullCalendar( 'changeView', viewName );
};
/* Change View */
$scope.renderCalender = function(calendar) {
if(uiCalendarConfig.calendars[calendar]){
uiCalendarConfig.calendars[calendar].fullCalendar('render');
}
};
/* Render Tooltip */
$scope.eventRender = function( event, element, view ) {
element.attr({'tooltip': event.title,
'tooltip-append-to-body': true});
$compile(element)($scope);
};
/* config object */
$scope.uiConfig = {
calendar:{
height: 450,
editable: true,
header:{
left: 'title',
center: '',
right: 'today prev,next'
},
eventClick: $scope.alertOnEventClick,
eventDrop: $scope.alertOnDrop,
eventResize: $scope.alertOnResize,
eventRender: $scope.eventRender
}
};
$scope.changeLang = function() {
if($scope.changeTo === 'Hungarian'){
$scope.uiConfig.calendar.dayNames = ["Vasárnap", "Hétfő", "Kedd", "Szerda", "Csütörtök", "Péntek", "Szombat"];
$scope.uiConfig.calendar.dayNamesShort = ["Vas", "Hét", "Kedd", "Sze", "Csüt", "Pén", "Szo"];
$scope.changeTo= 'English';
} else {
$scope.uiConfig.calendar.dayNames = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"];
$scope.uiConfig.calendar.dayNamesShort = ["Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"];
$scope.changeTo = 'Hungarian';
}
};
/* event sources array*/
$scope.eventSources = [$scope.events, $scope.eventSource, $scope.events];
$scope.eventSources2 = [$scope.calEventsExt, $scope.eventsF, $scope.events];
//////////////////// FINAL CALENDAR ////////////////////
////////// POPOVER ///////////////
$ionicPopover.fromTemplateUrl('templates/calendario.html', {
scope: $scope,
}).then(function(popover) {
$scope.calendario = popover;
});
$ionicPopover.fromTemplateUrl('templates/recado.html', {
scope: $scope,
}).then(function(popover) {
$scope.recado = popover;
});
$ionicPopover.fromTemplateUrl('templates/user-filho.html', {
scope: $scope,
}).then(function(popover) {
$scope.UserFilho = popover;
});
$scope.scrap = [
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.',
image:[{
thumb:'img/filho-01.jpg'
}],
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
},
{
id: 'Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
}
];
$scope.userfilho = [
{
id: 'Nome do primero filho nome nome nome',
images:[{
thumb:'img/filho-01.jpg'
}],
},
{
id: 'Nome do segundo filho',
images:[{
thumb:'img/filho-02.jpg'
}],
}
];
////////// FINAL DO POPOVER ///////////////
////////// ACORDION RECADOS ESCOLA ///////////////
$scope.groups = [
{
name: "Medicamento",
url: "templates/recados-medicamentos.html"
},
{
name: "Disposição",
url: "templates/recados-disposicao.html"
},
{
name: "Recado",
url: "templates/recados-recado.html"
}
];
$scope.group = $scope.groups[0];
/*
* if given group is the selected group, deselect it
* else, select the given group
*/
$scope.toggleGroup = function(group) {
if ($scope.isGroupShown(group)) {
$scope.shownGroup = null;
} else {
$scope.shownGroup = group;
}
};
$scope.isGroupShown = function(group) {
return $scope.shownGroup === group;
};
///// lista de radios de disposição///
$scope.clientSideList = [
{
text: "Agitado",
value: "bb"
},
{
text: "Normal",
value: "ng"
},
{
text: "Quieto",
value: "em"
}
];
////////// FINAL ACORDION RECADOS ESCOLA ///////////////
/*
* if given group is the selected group, deselect it
* else, select the given group
*/
$scope.toggleGroup = function(group) {
if ($scope.isGroupShown(group)) {
$scope | .shownGroup = null;
} else {
| conditional_block |
|
billing_controller.go | igs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
UserNamespacePrefix = "ns-"
ResourceQuotaPrefix = "quota-"
)
const BillingAnnotationLastUpdateTime = "account.sealos.io/last-update-time"
// BillingReconciler reconciles a Billing object
type BillingReconciler struct {
client.Client
Scheme *runtime.Scheme
mongoURI string
logr.Logger
AccountSystemNamespace string
}
//+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=resourcequotas,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=account.sealos.io,resources=accountbalances,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=account.sealos.io,resources=accountbalances/status,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Billing object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (r *BillingReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
r.Logger.V(1).Info("Reconcile Billing: ", "req.NamespacedName", req.NamespacedName)
dbCtx := context.Background()
dbClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return ctrl.Result{Requeue: true}, err
}
defer func() {
err := dbClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
ns := &corev1.Namespace{}
if err := r.Get(ctx, req.NamespacedName, ns); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if ns.DeletionTimestamp != nil {
r.Logger.V(1).Info("namespace is deleting", "namespace", ns)
return ctrl.Result{}, nil
}
own := ns.Annotations[v1.UserAnnotationCreatorKey]
if own == "" {
r.Logger.V(1).Info("billing namespace not found owner annotation", "namespace", ns.Name)
return ctrl.Result{}, nil
} else if own != getUsername(ns.Name) {
r.Logger.V(1).Info("billing namespace owner annotation not equal to namespace name", "namespace", ns.Name)
return ctrl.Result{}, nil
}
nsListStr := make([]string, 0)
// list all annotation equals to "user.sealos.io/creator"
// TODO 后续使用索引annotation List
//nsList := &corev1.NamespaceList{}
//if err := r.List(ctx, nsList); err != nil {
// return ctrl.Result{}, err
//}
//if err != nil {
// r.Error(err, "Failed to list namespace")
// return ctrl.Result{}, err
//}
//for _, namespace := range nsList.Items {
// if namespace.Annotations[v1.UserAnnotationCreatorKey] != own {
// continue
// }
// if err = r.syncResourceQuota(ctx, namespace.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
// }
// // sync limitrange
// nsListStr = append(nsListStr, namespace.Name)
//
//}
nsListStr = append(nsListStr, ns.Name)
//if err = r.syncResourceQuota(ctx, ns.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
//}
//r.Logger.Info("syncResourceQuota success", "nsListStr", nsListStr)
now := time.Now()
currentHourTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)
queryTime := currentHourTime.Add(-1 * time.Hour)
if exist, lastUpdateTime, _ := dbClient.GetBillingLastUpdateTime(own, v12.Consumption); exist {
if lastUpdateTime.Equal(currentHourTime) || lastUpdateTime.After(currentHourTime) {
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
// 24小时内的数据,从上次更新时间开始计算,否则从当前时间起算
if lastUpdateTime.After(currentHourTime.Add(-24 * time.Hour)) {
queryTime = lastUpdateTime
}
}
// 计算上次billing到当前的时间之间的整点,左 | me.Hour).Add(time.Hour); t.Before(currentHourTime) || t.Equal(currentHourTime); t = t.Add(time.Hour) {
if err = r.billingWithHourTime(ctx, t.UTC(), nsListStr, ns.Name, dbClient); err != nil {
r.Logger.Error(err, "billing with hour time failed", "time", t.Format(time.RFC3339))
return ctrl.Result{}, err
}
}
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
func (r *BillingReconciler) billingWithHourTime(ctx context.Context, queryTime time.Time, nsListStr []string, ownNs string, dbClient database.Interface) error {
r.Logger.Info("queryTime", "queryTime", queryTime.Format(time.RFC3339), "ownNs", ownNs, "nsListStr", nsListStr)
billing, err := dbClient.GetMeteringOwnerTimeResult(queryTime, nsListStr, nil, ownNs)
if err != nil {
return fmt.Errorf("get metering owner time result failed: %w", err)
}
if billing != nil {
if billing.Amount != 0 {
id, err := gonanoid.New(12)
if err != nil {
return fmt.Errorf("create id failed: %w", err)
}
// create accountbalance
accountBalance := v12.AccountBalance{
ObjectMeta: metav1.ObjectMeta{
Name: getUsername(ownNs) + "-" + queryTime.Format("20060102150405"),
Namespace: r.AccountSystemNamespace,
},
Spec: v12.AccountBalanceSpec{
OrderID: id,
Amount: billing.Amount,
Costs: billing.Costs,
Owner: getUsername(ownNs),
Time: metav1.Time{Time: queryTime},
Type: v12.Consumption,
},
}
// ignore already exists error
if err := r.Create(ctx, &accountBalance); client.IgnoreAlreadyExists(err) != nil {
return fmt.Errorf("create accountbalance failed: %w", err)
}
} else {
r.Logger.Info("billing amount is zero", "billingResult", billing)
}
} else {
r.Logger.Info("billing is nil", "queryTime", queryTime.Format(time.RFC3339))
}
return nil
}
func (r *BillingReconciler) initDB() error {
dbCtx := context.Background()
mongoClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return err
}
defer func() {
err := mongoClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
return mongoClient.CreateBillingIfNotExist()
}
//func (r *BillingReconciler) syncQueryRoleAndRoleBinding(ctx context.Context, name, namespace string) error {
// role := rbacV1.Role{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userQueryRole-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, | 开右闭
for t := queryTime.Truncate(ti | conditional_block |
billing_controller.go | ", "err", err)
}
}()
ns := &corev1.Namespace{}
if err := r.Get(ctx, req.NamespacedName, ns); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if ns.DeletionTimestamp != nil {
r.Logger.V(1).Info("namespace is deleting", "namespace", ns)
return ctrl.Result{}, nil
}
own := ns.Annotations[v1.UserAnnotationCreatorKey]
if own == "" {
r.Logger.V(1).Info("billing namespace not found owner annotation", "namespace", ns.Name)
return ctrl.Result{}, nil
} else if own != getUsername(ns.Name) {
r.Logger.V(1).Info("billing namespace owner annotation not equal to namespace name", "namespace", ns.Name)
return ctrl.Result{}, nil
}
nsListStr := make([]string, 0)
// list all annotation equals to "user.sealos.io/creator"
// TODO 后续使用索引annotation List
//nsList := &corev1.NamespaceList{}
//if err := r.List(ctx, nsList); err != nil {
// return ctrl.Result{}, err
//}
//if err != nil {
// r.Error(err, "Failed to list namespace")
// return ctrl.Result{}, err
//}
//for _, namespace := range nsList.Items {
// if namespace.Annotations[v1.UserAnnotationCreatorKey] != own {
// continue
// }
// if err = r.syncResourceQuota(ctx, namespace.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
// }
// // sync limitrange
// nsListStr = append(nsListStr, namespace.Name)
//
//}
nsListStr = append(nsListStr, ns.Name)
//if err = r.syncResourceQuota(ctx, ns.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
//}
//r.Logger.Info("syncResourceQuota success", "nsListStr", nsListStr)
now := time.Now()
currentHourTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)
queryTime := currentHourTime.Add(-1 * time.Hour)
if exist, lastUpdateTime, _ := dbClient.GetBillingLastUpdateTime(own, v12.Consumption); exist {
if lastUpdateTime.Equal(currentHourTime) || lastUpdateTime.After(currentHourTime) {
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
// 24小时内的数据,从上次更新时间开始计算,否则从当前时间起算
if lastUpdateTime.After(currentHourTime.Add(-24 * time.Hour)) {
queryTime = lastUpdateTime
}
}
// 计算上次billing到当前的时间之间的整点,左开右闭
for t := queryTime.Truncate(time.Hour).Add(time.Hour); t.Before(currentHourTime) || t.Equal(currentHourTime); t = t.Add(time.Hour) {
if err = r.billingWithHourTime(ctx, t.UTC(), nsListStr, ns.Name, dbClient); err != nil {
r.Logger.Error(err, "billing with hour time failed", "time", t.Format(time.RFC3339))
return ctrl.Result{}, err
}
}
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
func (r *BillingReconciler) billingWithHourTime(ctx context.Context, queryTime time.Time, nsListStr []string, ownNs string, dbClient database.Interface) error {
r.Logger.Info("queryTime", "queryTime", queryTime.Format(time.RFC3339), "ownNs", ownNs, "nsListStr", nsListStr)
billing, err := dbClient.GetMeteringOwnerTimeResult(queryTime, nsListStr, nil, ownNs)
if err != nil {
return fmt.Errorf("get metering owner time result failed: %w", err)
}
if billing != nil {
if billing.Amount != 0 {
id, err := gonanoid.New(12)
if err != nil {
return fmt.Errorf("create id failed: %w", err)
}
// create accountbalance
accountBalance := v12.AccountBalance{
ObjectMeta: metav1.ObjectMeta{
Name: getUsername(ownNs) + "-" + queryTime.Format("20060102150405"),
Namespace: r.AccountSystemNamespace,
},
Spec: v12.AccountBalanceSpec{
OrderID: id,
Amount: billing.Amount,
Costs: billing.Costs,
Owner: getUsername(ownNs),
Time: metav1.Time{Time: queryTime},
Type: v12.Consumption,
},
}
// ignore already exists error
if err := r.Create(ctx, &accountBalance); client.IgnoreAlreadyExists(err) != nil {
return fmt.Errorf("create accountbalance failed: %w", err)
}
} else {
r.Logger.Info("billing amount is zero", "billingResult", billing)
}
} else {
r.Logger.Info("billing is nil", "queryTime", queryTime.Format(time.RFC3339))
}
return nil
}
func (r *BillingReconciler) initDB() error {
dbCtx := context.Background()
mongoClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return err
}
defer func() {
err := mongoClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
return mongoClient.CreateBillingIfNotExist()
}
//func (r *BillingReconciler) syncQueryRoleAndRoleBinding(ctx context.Context, name, namespace string) error {
// role := rbacV1.Role{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userQueryRole-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, &role, func() error {
// role.Rules = []rbacV1.PolicyRule{
// {
// APIGroups: []string{"account.sealos.io"},
// Resources: []string{"billingrecordqueries"},
// Verbs: []string{"create", "get", "watch", "list"},
// },
// }
// return nil
// }); err != nil {
// return fmt.Errorf("create role failed: %v,username: %v,namespace: %v", err, name, namespace)
// }
// roleBinding := rbacV1.RoleBinding{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userAccountRoleBinding-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, &roleBinding, func() error {
// roleBinding.RoleRef = rbacV1.RoleRef{
// APIGroup: "rbac.authorization.k8s.io",
// Kind: "Role",
// Name: role.Name,
// }
// roleBinding.Subjects = helper.GetUsersSubject(name)
// return nil
// }); err != nil {
// return fmt.Errorf("create roleBinding failed: %v,rolename: %v,username: %v,ns: %v", err, role.Name, name, namespace)
// }
// return nil
//}
// SetupWithManager sets up the controller with the Manager.
func (r *BillingReconciler) SetupWithManager(mgr ctrl.Manager, rateOpts controller.Options) error {
if r.mongoURI = os.Getenv(database.MongoURI); r.mongoURI == "" {
return fmt.Errorf("env %s is empty", database.MongoURI)
}
r.Logger = ctrl.Log.WithName("controller").WithName("Billing")
if err := r.initDB(); err != nil {
r.Logger.Error(err, "init db failed")
}
r.AccountSystemNamespace = os.Getenv(ACCOUNTNAMESPACEENV)
if r.AccountSystemNamespace == "" {
r.AccountSystemNamespace = DEFAULTACCOUNTNAMESPACE
}
return ctrl.NewControllerManagedBy(mgr).
For(&corev1.Namespace{}, builder.WithPredicates(predicate.Funcs{
CreateFunc: func(createEvent event.CreateEvent) bool {
_, ok := createEvent.Object.GetAnnotations()[v1.UserAnnotationCreatorKey]
return ok
},
UpdateFunc: func(updateEvent event.UpdateEvent) bool {
return false
},
DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
return false
},
GenericFunc: func(genericEvent event.GenericEvent) bool {
return false
},
})).
WithOptions(rateOpts).
Complete(r)
}
func getUsername(namespace string) string {
return strings.TrimPrefix(namespace, UserNamespacePrefix)
}
| identifier_body |
||
billing_controller.go | reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Billing object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (r *BillingReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
r.Logger.V(1).Info("Reconcile Billing: ", "req.NamespacedName", req.NamespacedName)
dbCtx := context.Background()
dbClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return ctrl.Result{Requeue: true}, err
}
defer func() {
err := dbClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
ns := &corev1.Namespace{}
if err := r.Get(ctx, req.NamespacedName, ns); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if ns.DeletionTimestamp != nil {
r.Logger.V(1).Info("namespace is deleting", "namespace", ns)
return ctrl.Result{}, nil
}
own := ns.Annotations[v1.UserAnnotationCreatorKey]
if own == "" {
r.Logger.V(1).Info("billing namespace not found owner annotation", "namespace", ns.Name)
return ctrl.Result{}, nil
} else if own != getUsername(ns.Name) {
r.Logger.V(1).Info("billing namespace owner annotation not equal to namespace name", "namespace", ns.Name)
return ctrl.Result{}, nil
}
nsListStr := make([]string, 0)
// list all annotation equals to "user.sealos.io/creator"
// TODO 后续使用索引annotation List
//nsList := &corev1.NamespaceList{}
//if err := r.List(ctx, nsList); err != nil {
// return ctrl.Result{}, err
//}
//if err != nil {
// r.Error(err, "Failed to list namespace")
// return ctrl.Result{}, err
//}
//for _, namespace := range nsList.Items {
// if namespace.Annotations[v1.UserAnnotationCreatorKey] != own {
// continue
// }
// if err = r.syncResourceQuota(ctx, namespace.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
// }
// // sync limitrange
// nsListStr = append(nsListStr, namespace.Name)
//
//}
nsListStr = append(nsListStr, ns.Name)
//if err = r.syncResourceQuota(ctx, ns.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
//}
//r.Logger.Info("syncResourceQuota success", "nsListStr", nsListStr)
now := time.Now()
currentHourTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)
queryTime := currentHourTime.Add(-1 * time.Hour)
if exist, lastUpdateTime, _ := dbClient.GetBillingLastUpdateTime(own, v12.Consumption); exist {
if lastUpdateTime.Equal(currentHourTime) || lastUpdateTime.After(currentHourTime) {
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
// 24小时内的数据,从上次更新时间开始计算,否则从当前时间起算
if lastUpdateTime.After(currentHourTime.Add(-24 * time.Hour)) {
queryTime = lastUpdateTime
}
}
// 计算上次billing到当前的时间之间的整点,左开右闭
for t := queryTime.Truncate(time.Hour).Add(time.Hour); t.Before(currentHourTime) || t.Equal(currentHourTime); t = t.Add(time.Hour) {
if err = r.billingWithHourTime(ctx, t.UTC(), nsListStr, ns.Name, dbClient); err != nil {
r.Logger.Error(err, "billing with hour time failed", "time", t.Format(time.RFC3339))
return ctrl.Result{}, err
}
}
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
func (r *BillingReconciler) billingWithHourTime(ctx context.Context, queryTime time.Time, nsListStr []string, ownNs string, dbClient database.Interface) error {
r.Logger.Info("queryTime", "queryTime", queryTime.Format(time.RFC3339), "ownNs", ownNs, "nsListStr", nsListStr)
billing, err := dbClient.GetMeteringOwnerTimeResult(queryTime, nsListStr, nil, ownNs)
if err != nil {
return fmt.Errorf("get metering owner time result failed: %w", err)
}
if billing != nil {
if billing.Amount != 0 {
id, err := gonanoid.New(12)
if err != nil {
return fmt.Errorf("create id failed: %w", err)
}
// create accountbalance
accountBalance := v12.AccountBalance{
ObjectMeta: metav1.ObjectMeta{
Name: getUsername(ownNs) + "-" + queryTime.Format("20060102150405"),
Namespace: r.AccountSystemNamespace,
},
Spec: v12.AccountBalanceSpec{
OrderID: id,
Amount: billing.Amount,
Costs: billing.Costs,
Owner: getUsername(ownNs),
Time: metav1.Time{Time: queryTime},
Type: v12.Consumption,
},
}
// ignore already exists error
if err := r.Create(ctx, &accountBalance); client.IgnoreAlreadyExists(err) != nil {
return fmt.Errorf("create accountbalance failed: %w", err)
}
} else {
r.Logger.Info("billing amount is zero", "billingResult", billing)
}
} else {
r.Logger.Info("billing is nil", "queryTime", queryTime.Format(time.RFC3339))
}
return nil
}
func (r *BillingReconciler) initDB() error {
dbCtx := context.Background()
mongoClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return err
}
defer func() {
err := mongoClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
return mongoClient.CreateBillingIfNotExist()
}
//func (r *BillingReconciler) syncQueryRoleAndRoleBinding(ctx context.Context, name, namespace string) error {
// role := rbacV1.Role{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userQueryRole-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, &role, func() error {
// role.Rules = []rbacV1.PolicyRule{
// {
// APIGroups: []string{"account.sealos.io"},
// Resources: []string{"billingrecordqueries"},
// Verbs: []string{"create", "get", "watch", "list"},
// },
// }
// return nil
// }); err != nil {
// return fmt.Errorf("create role failed: %v,username: %v,namespace: %v", err, name, namespace)
// }
// roleBinding := rbacV1.RoleBinding{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userAccountRoleBinding-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, r.Client, &roleBinding, func() error {
// roleBinding.RoleRef = rbacV1.RoleRef{
// APIGroup: "rbac.authorization.k8s.io",
// Kind: "Role",
// Name: role.Name,
// }
// roleBinding.Subjects = helper.GetUsersSubject(name)
// return nil
// }); err != nil {
// return fmt.Errorf("create roleBinding failed: %v,rolename: %v,username: %v,ns: %v", err, role.Name, name, namespace)
// }
// return nil
//}
// SetupWithManager sets up the controller with the Manager.
func (r *BillingReconciler) SetupWithManager(mgr ctrl.Manager, rateOpts controller.Options) error {
if r.mongoURI = os.Getenv(database. | MongoURI); r.mon | identifier_name |
|
billing_controller.go | igs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
UserNamespacePrefix = "ns-"
ResourceQuotaPrefix = "quota-"
)
const BillingAnnotationLastUpdateTime = "account.sealos.io/last-update-time"
// BillingReconciler reconciles a Billing object
type BillingReconciler struct {
client.Client
Scheme *runtime.Scheme
mongoURI string
logr.Logger
AccountSystemNamespace string
} | //+kubebuilder:rbac:groups=account.sealos.io,resources=accountbalances,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=account.sealos.io,resources=accountbalances/status,verbs=get;list;watch;create;update;patch;delete
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Billing object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/reconcile
func (r *BillingReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
r.Logger.V(1).Info("Reconcile Billing: ", "req.NamespacedName", req.NamespacedName)
dbCtx := context.Background()
dbClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return ctrl.Result{Requeue: true}, err
}
defer func() {
err := dbClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
ns := &corev1.Namespace{}
if err := r.Get(ctx, req.NamespacedName, ns); err != nil {
return ctrl.Result{}, client.IgnoreNotFound(err)
}
if ns.DeletionTimestamp != nil {
r.Logger.V(1).Info("namespace is deleting", "namespace", ns)
return ctrl.Result{}, nil
}
own := ns.Annotations[v1.UserAnnotationCreatorKey]
if own == "" {
r.Logger.V(1).Info("billing namespace not found owner annotation", "namespace", ns.Name)
return ctrl.Result{}, nil
} else if own != getUsername(ns.Name) {
r.Logger.V(1).Info("billing namespace owner annotation not equal to namespace name", "namespace", ns.Name)
return ctrl.Result{}, nil
}
nsListStr := make([]string, 0)
// list all annotation equals to "user.sealos.io/creator"
// TODO 后续使用索引annotation List
//nsList := &corev1.NamespaceList{}
//if err := r.List(ctx, nsList); err != nil {
// return ctrl.Result{}, err
//}
//if err != nil {
// r.Error(err, "Failed to list namespace")
// return ctrl.Result{}, err
//}
//for _, namespace := range nsList.Items {
// if namespace.Annotations[v1.UserAnnotationCreatorKey] != own {
// continue
// }
// if err = r.syncResourceQuota(ctx, namespace.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
// }
// // sync limitrange
// nsListStr = append(nsListStr, namespace.Name)
//
//}
nsListStr = append(nsListStr, ns.Name)
//if err = r.syncResourceQuota(ctx, ns.Name); err != nil {
// r.Error(err, "Failed to syncResourceQuota")
// return ctrl.Result{}, err
//}
//r.Logger.Info("syncResourceQuota success", "nsListStr", nsListStr)
now := time.Now()
currentHourTime := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)
queryTime := currentHourTime.Add(-1 * time.Hour)
if exist, lastUpdateTime, _ := dbClient.GetBillingLastUpdateTime(own, v12.Consumption); exist {
if lastUpdateTime.Equal(currentHourTime) || lastUpdateTime.After(currentHourTime) {
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
// 24小时内的数据,从上次更新时间开始计算,否则从当前时间起算
if lastUpdateTime.After(currentHourTime.Add(-24 * time.Hour)) {
queryTime = lastUpdateTime
}
}
// 计算上次billing到当前的时间之间的整点,左开右闭
for t := queryTime.Truncate(time.Hour).Add(time.Hour); t.Before(currentHourTime) || t.Equal(currentHourTime); t = t.Add(time.Hour) {
if err = r.billingWithHourTime(ctx, t.UTC(), nsListStr, ns.Name, dbClient); err != nil {
r.Logger.Error(err, "billing with hour time failed", "time", t.Format(time.RFC3339))
return ctrl.Result{}, err
}
}
return ctrl.Result{Requeue: true, RequeueAfter: time.Until(currentHourTime.Add(1*time.Hour + 10*time.Minute))}, nil
}
func (r *BillingReconciler) billingWithHourTime(ctx context.Context, queryTime time.Time, nsListStr []string, ownNs string, dbClient database.Interface) error {
r.Logger.Info("queryTime", "queryTime", queryTime.Format(time.RFC3339), "ownNs", ownNs, "nsListStr", nsListStr)
billing, err := dbClient.GetMeteringOwnerTimeResult(queryTime, nsListStr, nil, ownNs)
if err != nil {
return fmt.Errorf("get metering owner time result failed: %w", err)
}
if billing != nil {
if billing.Amount != 0 {
id, err := gonanoid.New(12)
if err != nil {
return fmt.Errorf("create id failed: %w", err)
}
// create accountbalance
accountBalance := v12.AccountBalance{
ObjectMeta: metav1.ObjectMeta{
Name: getUsername(ownNs) + "-" + queryTime.Format("20060102150405"),
Namespace: r.AccountSystemNamespace,
},
Spec: v12.AccountBalanceSpec{
OrderID: id,
Amount: billing.Amount,
Costs: billing.Costs,
Owner: getUsername(ownNs),
Time: metav1.Time{Time: queryTime},
Type: v12.Consumption,
},
}
// ignore already exists error
if err := r.Create(ctx, &accountBalance); client.IgnoreAlreadyExists(err) != nil {
return fmt.Errorf("create accountbalance failed: %w", err)
}
} else {
r.Logger.Info("billing amount is zero", "billingResult", billing)
}
} else {
r.Logger.Info("billing is nil", "queryTime", queryTime.Format(time.RFC3339))
}
return nil
}
func (r *BillingReconciler) initDB() error {
dbCtx := context.Background()
mongoClient, err := database.NewMongoDB(dbCtx, r.mongoURI)
if err != nil {
r.Logger.Error(err, "connect mongo client failed")
return err
}
defer func() {
err := mongoClient.Disconnect(dbCtx)
if err != nil {
r.Logger.V(5).Info("disconnect mongo client failed", "err", err)
}
}()
return mongoClient.CreateBillingIfNotExist()
}
//func (r *BillingReconciler) syncQueryRoleAndRoleBinding(ctx context.Context, name, namespace string) error {
// role := rbacV1.Role{
// ObjectMeta: metav1.ObjectMeta{
// Name: "userQueryRole-" + name,
// Namespace: namespace,
// },
// }
// if _, err := controllerutil.CreateOrUpdate(ctx, r |
//+kubebuilder:rbac:groups=core,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=resourcequotas,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=get;list;watch;create;update;patch;delete | random_line_split |
fetch_places.rs | with position but lacking places.
#[structopt(long, short)]
auto: bool,
/// Image ids to fetch place data for
photos: Vec<i32>,
}
impl Fetchplaces {
pub fn run(&self) -> Result<(), super::adm::result::Error> {
let db = self.db.connect()?;
if self.auto {
println!("Should find {} photos to fetch places for", self.limit);
use crate::schema::photo_places::dsl as place;
use crate::schema::positions::dsl as pos;
let result = pos::positions
.select((pos::photo_id, (pos::latitude, pos::longitude)))
.filter(pos::photo_id.ne_all(
place::photo_places.select(place::photo_id).distinct(),
))
.order(pos::photo_id.desc())
.limit(self.limit)
.load::<(i32, Coord)>(&db)?;
for (photo_id, coord) in result {
println!("Find places for #{}, {:?}", photo_id, coord);
self.overpass.update_image_places(&db, photo_id)?;
}
} else {
for photo in &self.photos {
self.overpass.update_image_places(&db, *photo)?;
}
}
Ok(())
}
}
#[derive(Clone, Debug, StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub struct OverpassOpt {
/// How to connect to the overpass API.
///
/// See https://wiki.openstreetmap.org/wiki/Overpass_API for
/// available servers and policies.
#[structopt(long, env = "OVERPASS_URL")]
overpass_url: String,
}
impl OverpassOpt {
pub fn update_image_places(
&self,
c: &PgConnection,
image: i32,
) -> Result<(), Error> {
use crate::schema::positions::dsl::*;
let coord = positions
.filter(photo_id.eq(image))
.select((latitude, longitude))
.first::<Coord>(c)
.optional()
.map_err(|e| Error::Db(image, e))?
.ok_or_else(|| Error::NoPosition(image))?;
debug!("Should get places for #{} at {:?}", image, coord);
let data = Client::new()
.post(&self.overpass_url)
.body(format!("[out:json];is_in({},{});out;", coord.x, coord.y))
.send()
.and_then(Response::error_for_status)
.and_then(|mut r| r.json::<Value>())
.map_err(|e| Error::Server(image, e))?;
if let Some(elements) = data
.as_object()
.and_then(|o| o.get("elements"))
.and_then(Value::as_array)
{
for obj in elements {
if let (Some(t_osm_id), Some((name, level))) =
(osm_id(obj), name_and_level(obj))
{
debug!("{}: {} (level {})", t_osm_id, name, level);
let place = get_or_create_place(c, t_osm_id, name, level)
.map_err(|e| Error::Db(image, e))?;
if place.osm_id.is_none() {
debug!("Matched {:?} by name, update osm info", place);
use crate::schema::places::dsl::*;
diesel::update(places)
.filter(id.eq(place.id))
.set((
osm_id.eq(Some(t_osm_id)),
osm_level.eq(level),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
use crate::models::PhotoPlace;
use crate::schema::photo_places::dsl::*;
let q = photo_places
.filter(photo_id.eq(image))
.filter(place_id.eq(place.id));
if q.first::<PhotoPlace>(c).is_ok() {
debug!(
"Photo #{} already has {} ({})",
image, place.id, place.place_name
);
} else {
diesel::insert_into(photo_places)
.values((
photo_id.eq(image),
place_id.eq(place.id),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
} else {
info!("Unused area: {}", obj);
}
}
}
Ok(())
}
}
fn osm_id(obj: &Value) -> Option<i64> {
obj.get("id").and_then(Value::as_i64)
}
fn name_and_level(obj: &Value) -> Option<(&str, i16)> {
if let Some(tags) = obj.get("tags") {
let name = tags
.get("name:sv")
//.or_else(|| tags.get("name:en"))
.or_else(|| tags.get("name"))
.and_then(Value::as_str);
let level = tags
.get("admin_level")
.and_then(Value::as_str)
.and_then(|l| l.parse().ok())
.or_else(|| match tag_str(tags, "leisure") {
Some("garden") => Some(18),
Some("nature_reserve") => Some(12),
Some("park") => Some(14),
Some("pitch") => Some(15),
Some("playground") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "tourism") {
Some("attraction") => Some(16),
Some("theme_park") | Some("zoo") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "boundary") {
Some("national_park") => Some(14),
Some("historic") => Some(7), // Seems to be mainly "Landskap"
_ => None,
})
.or_else(|| match tag_str(tags, "landuse") {
Some("allotments") => Some(14),
Some("commercial") => Some(12),
Some("grass") => Some(13),
Some("industrial") => Some(11),
Some("residential") => Some(11),
Some("retail") => Some(13),
_ => None,
})
.or_else(|| match tag_str(tags, "highway") { | Some("station") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "amenity") {
Some("bus_station") => Some(16),
Some("exhibition_center") => Some(20),
Some("kindergarten") => Some(15),
Some("place_of_worship") => Some(15),
Some("school") => Some(14),
Some("university") => Some(12),
_ => None,
})
.or_else(|| match tag_str(tags, "aeroway") {
Some("aerodrome") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "water") {
Some("lake") => Some(15),
_ => None,
})
.or_else(|| match tag_str(tags, "waterway") {
Some("riverbank") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "man_made") {
Some("bridge") => Some(17),
_ => None,
})
.or_else(|| match tag_str(tags, "place") {
Some("city_block") => Some(17),
Some("island") => Some(13),
Some("islet") => Some(17),
Some("penisula") => Some(13),
Some("region") => Some(8),
Some("square") => Some(18),
Some("suburb") => Some(11),
_ => None,
})
.or_else(|| match tag_str(tags, "natural") {
Some("bay") => Some(14),
Some("wood") => Some(14),
Some("scrub") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "building") {
Some("exhibition_center") => Some(19),
Some("sports_hall") => Some(19),
Some(_) => Some(20),
_ => None,
})
.or_else(|| match tag_str(tags, "political_division") {
Some("canton") => Some(9),
_ => None,
});
if let (Some(name), Some(level)) = (name, level) {
debug!("{} is level {}", name, level);
Some((name, level))
} else {
None
}
} else {
warn!("Tag-less object {:?}", obj);
None
}
}
fn tag_str<'a>(tags: &'a Value, name: &str) -> Option<&'a str> {
tags.get(name).and_then(Value::as_str)
}
fn get_or_create_place(
c | Some("pedestrian") => Some(15), // torg
Some("rest_area") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "public_transport") { | random_line_split |
fetch_places.rs | with position but lacking places.
#[structopt(long, short)]
auto: bool,
/// Image ids to fetch place data for
photos: Vec<i32>,
}
impl Fetchplaces {
pub fn run(&self) -> Result<(), super::adm::result::Error> {
let db = self.db.connect()?;
if self.auto {
println!("Should find {} photos to fetch places for", self.limit);
use crate::schema::photo_places::dsl as place;
use crate::schema::positions::dsl as pos;
let result = pos::positions
.select((pos::photo_id, (pos::latitude, pos::longitude)))
.filter(pos::photo_id.ne_all(
place::photo_places.select(place::photo_id).distinct(),
))
.order(pos::photo_id.desc())
.limit(self.limit)
.load::<(i32, Coord)>(&db)?;
for (photo_id, coord) in result {
println!("Find places for #{}, {:?}", photo_id, coord);
self.overpass.update_image_places(&db, photo_id)?;
}
} else {
for photo in &self.photos {
self.overpass.update_image_places(&db, *photo)?;
}
}
Ok(())
}
}
#[derive(Clone, Debug, StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub struct OverpassOpt {
/// How to connect to the overpass API.
///
/// See https://wiki.openstreetmap.org/wiki/Overpass_API for
/// available servers and policies.
#[structopt(long, env = "OVERPASS_URL")]
overpass_url: String,
}
impl OverpassOpt {
pub fn update_image_places(
&self,
c: &PgConnection,
image: i32,
) -> Result<(), Error> {
use crate::schema::positions::dsl::*;
let coord = positions
.filter(photo_id.eq(image))
.select((latitude, longitude))
.first::<Coord>(c)
.optional()
.map_err(|e| Error::Db(image, e))?
.ok_or_else(|| Error::NoPosition(image))?;
debug!("Should get places for #{} at {:?}", image, coord);
let data = Client::new()
.post(&self.overpass_url)
.body(format!("[out:json];is_in({},{});out;", coord.x, coord.y))
.send()
.and_then(Response::error_for_status)
.and_then(|mut r| r.json::<Value>())
.map_err(|e| Error::Server(image, e))?;
if let Some(elements) = data
.as_object()
.and_then(|o| o.get("elements"))
.and_then(Value::as_array)
{
for obj in elements {
if let (Some(t_osm_id), Some((name, level))) =
(osm_id(obj), name_and_level(obj))
{
debug!("{}: {} (level {})", t_osm_id, name, level);
let place = get_or_create_place(c, t_osm_id, name, level)
.map_err(|e| Error::Db(image, e))?;
if place.osm_id.is_none() {
debug!("Matched {:?} by name, update osm info", place);
use crate::schema::places::dsl::*;
diesel::update(places)
.filter(id.eq(place.id))
.set((
osm_id.eq(Some(t_osm_id)),
osm_level.eq(level),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
use crate::models::PhotoPlace;
use crate::schema::photo_places::dsl::*;
let q = photo_places
.filter(photo_id.eq(image))
.filter(place_id.eq(place.id));
if q.first::<PhotoPlace>(c).is_ok() {
debug!(
"Photo #{} already has {} ({})",
image, place.id, place.place_name
);
} else {
diesel::insert_into(photo_places)
.values((
photo_id.eq(image),
place_id.eq(place.id),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
} else {
info!("Unused area: {}", obj);
}
}
}
Ok(())
}
}
fn osm_id(obj: &Value) -> Option<i64> {
obj.get("id").and_then(Value::as_i64)
}
fn name_and_level(obj: &Value) -> Option<(&str, i16)> | Some("attraction") => Some(16),
Some("theme_park") | Some("zoo") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "boundary") {
Some("national_park") => Some(14),
Some("historic") => Some(7), // Seems to be mainly "Landskap"
_ => None,
})
.or_else(|| match tag_str(tags, "landuse") {
Some("allotments") => Some(14),
Some("commercial") => Some(12),
Some("grass") => Some(13),
Some("industrial") => Some(11),
Some("residential") => Some(11),
Some("retail") => Some(13),
_ => None,
})
.or_else(|| match tag_str(tags, "highway") {
Some("pedestrian") => Some(15), // torg
Some("rest_area") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "public_transport") {
Some("station") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "amenity") {
Some("bus_station") => Some(16),
Some("exhibition_center") => Some(20),
Some("kindergarten") => Some(15),
Some("place_of_worship") => Some(15),
Some("school") => Some(14),
Some("university") => Some(12),
_ => None,
})
.or_else(|| match tag_str(tags, "aeroway") {
Some("aerodrome") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "water") {
Some("lake") => Some(15),
_ => None,
})
.or_else(|| match tag_str(tags, "waterway") {
Some("riverbank") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "man_made") {
Some("bridge") => Some(17),
_ => None,
})
.or_else(|| match tag_str(tags, "place") {
Some("city_block") => Some(17),
Some("island") => Some(13),
Some("islet") => Some(17),
Some("penisula") => Some(13),
Some("region") => Some(8),
Some("square") => Some(18),
Some("suburb") => Some(11),
_ => None,
})
.or_else(|| match tag_str(tags, "natural") {
Some("bay") => Some(14),
Some("wood") => Some(14),
Some("scrub") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "building") {
Some("exhibition_center") => Some(19),
Some("sports_hall") => Some(19),
Some(_) => Some(20),
_ => None,
})
.or_else(|| match tag_str(tags, "political_division") {
Some("canton") => Some(9),
_ => None,
});
if let (Some(name), Some(level)) = (name, level) {
debug!("{} is level {}", name, level);
Some((name, level))
} else {
None
}
} else {
warn!("Tag-less object {:?}", obj);
None
}
}
fn tag_str<'a>(tags: &'a Value, name: &str) -> Option<&'a str> {
tags.get(name).and_then(Value::as_str)
}
fn get_or_create_place(
| {
if let Some(tags) = obj.get("tags") {
let name = tags
.get("name:sv")
//.or_else(|| tags.get("name:en"))
.or_else(|| tags.get("name"))
.and_then(Value::as_str);
let level = tags
.get("admin_level")
.and_then(Value::as_str)
.and_then(|l| l.parse().ok())
.or_else(|| match tag_str(tags, "leisure") {
Some("garden") => Some(18),
Some("nature_reserve") => Some(12),
Some("park") => Some(14),
Some("pitch") => Some(15),
Some("playground") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "tourism") { | identifier_body |
fetch_places.rs | with position but lacking places.
#[structopt(long, short)]
auto: bool,
/// Image ids to fetch place data for
photos: Vec<i32>,
}
impl Fetchplaces {
pub fn run(&self) -> Result<(), super::adm::result::Error> {
let db = self.db.connect()?;
if self.auto {
println!("Should find {} photos to fetch places for", self.limit);
use crate::schema::photo_places::dsl as place;
use crate::schema::positions::dsl as pos;
let result = pos::positions
.select((pos::photo_id, (pos::latitude, pos::longitude)))
.filter(pos::photo_id.ne_all(
place::photo_places.select(place::photo_id).distinct(),
))
.order(pos::photo_id.desc())
.limit(self.limit)
.load::<(i32, Coord)>(&db)?;
for (photo_id, coord) in result {
println!("Find places for #{}, {:?}", photo_id, coord);
self.overpass.update_image_places(&db, photo_id)?;
}
} else {
for photo in &self.photos {
self.overpass.update_image_places(&db, *photo)?;
}
}
Ok(())
}
}
#[derive(Clone, Debug, StructOpt)]
#[structopt(rename_all = "kebab-case")]
pub struct OverpassOpt {
/// How to connect to the overpass API.
///
/// See https://wiki.openstreetmap.org/wiki/Overpass_API for
/// available servers and policies.
#[structopt(long, env = "OVERPASS_URL")]
overpass_url: String,
}
impl OverpassOpt {
pub fn update_image_places(
&self,
c: &PgConnection,
image: i32,
) -> Result<(), Error> {
use crate::schema::positions::dsl::*;
let coord = positions
.filter(photo_id.eq(image))
.select((latitude, longitude))
.first::<Coord>(c)
.optional()
.map_err(|e| Error::Db(image, e))?
.ok_or_else(|| Error::NoPosition(image))?;
debug!("Should get places for #{} at {:?}", image, coord);
let data = Client::new()
.post(&self.overpass_url)
.body(format!("[out:json];is_in({},{});out;", coord.x, coord.y))
.send()
.and_then(Response::error_for_status)
.and_then(|mut r| r.json::<Value>())
.map_err(|e| Error::Server(image, e))?;
if let Some(elements) = data
.as_object()
.and_then(|o| o.get("elements"))
.and_then(Value::as_array)
{
for obj in elements {
if let (Some(t_osm_id), Some((name, level))) =
(osm_id(obj), name_and_level(obj))
{
debug!("{}: {} (level {})", t_osm_id, name, level);
let place = get_or_create_place(c, t_osm_id, name, level)
.map_err(|e| Error::Db(image, e))?;
if place.osm_id.is_none() {
debug!("Matched {:?} by name, update osm info", place);
use crate::schema::places::dsl::*;
diesel::update(places)
.filter(id.eq(place.id))
.set((
osm_id.eq(Some(t_osm_id)),
osm_level.eq(level),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
use crate::models::PhotoPlace;
use crate::schema::photo_places::dsl::*;
let q = photo_places
.filter(photo_id.eq(image))
.filter(place_id.eq(place.id));
if q.first::<PhotoPlace>(c).is_ok() {
debug!(
"Photo #{} already has {} ({})",
image, place.id, place.place_name
);
} else {
diesel::insert_into(photo_places)
.values((
photo_id.eq(image),
place_id.eq(place.id),
))
.execute(c)
.map_err(|e| Error::Db(image, e))?;
}
} else {
info!("Unused area: {}", obj);
}
}
}
Ok(())
}
}
fn osm_id(obj: &Value) -> Option<i64> {
obj.get("id").and_then(Value::as_i64)
}
fn name_and_level(obj: &Value) -> Option<(&str, i16)> {
if let Some(tags) = obj.get("tags") {
let name = tags
.get("name:sv")
//.or_else(|| tags.get("name:en"))
.or_else(|| tags.get("name"))
.and_then(Value::as_str);
let level = tags
.get("admin_level")
.and_then(Value::as_str)
.and_then(|l| l.parse().ok())
.or_else(|| match tag_str(tags, "leisure") {
Some("garden") => Some(18),
Some("nature_reserve") => Some(12),
Some("park") => Some(14),
Some("pitch") => Some(15),
Some("playground") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "tourism") {
Some("attraction") => Some(16),
Some("theme_park") | Some("zoo") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "boundary") {
Some("national_park") => Some(14),
Some("historic") => Some(7), // Seems to be mainly "Landskap"
_ => None,
})
.or_else(|| match tag_str(tags, "landuse") {
Some("allotments") => Some(14),
Some("commercial") => Some(12),
Some("grass") => Some(13),
Some("industrial") => Some(11),
Some("residential") => Some(11),
Some("retail") => Some(13),
_ => None,
})
.or_else(|| match tag_str(tags, "highway") {
Some("pedestrian") => Some(15), // torg
Some("rest_area") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "public_transport") {
Some("station") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "amenity") {
Some("bus_station") => Some(16),
Some("exhibition_center") => Some(20),
Some("kindergarten") => Some(15),
Some("place_of_worship") => Some(15),
Some("school") => Some(14),
Some("university") => Some(12),
_ => None,
})
.or_else(|| match tag_str(tags, "aeroway") {
Some("aerodrome") => Some(14),
_ => None,
})
.or_else(|| match tag_str(tags, "water") {
Some("lake") => Some(15),
_ => None,
})
.or_else(|| match tag_str(tags, "waterway") {
Some("riverbank") => Some(16),
_ => None,
})
.or_else(|| match tag_str(tags, "man_made") {
Some("bridge") => Some(17),
_ => None,
})
.or_else(|| match tag_str(tags, "place") {
Some("city_block") => Some(17),
Some("island") => Some(13),
Some("islet") => Some(17),
Some("penisula") => Some(13),
Some("region") => Some(8),
Some("square") => Some(18),
Some("suburb") => Some(11),
_ => None,
})
.or_else(|| match tag_str(tags, "natural") {
Some("bay") => Some(14),
Some("wood") => Some(14),
Some("scrub") => Some(18),
_ => None,
})
.or_else(|| match tag_str(tags, "building") {
Some("exhibition_center") => Some(19),
Some("sports_hall") => Some(19),
Some(_) => Some(20),
_ => None,
})
.or_else(|| match tag_str(tags, "political_division") {
Some("canton") => Some(9),
_ => None,
});
if let (Some(name), Some(level)) = (name, level) {
debug!("{} is level {}", name, level);
Some((name, level))
} else {
None
}
} else {
warn!("Tag-less object {:?}", obj);
None
}
}
fn | <'a>(tags: &'a Value, name: &str) -> Option<&'a str> {
tags.get(name).and_then(Value::as_str)
}
fn get_or_create_place(
| tag_str | identifier_name |
inbound.rs | all the inbound SWIM messages.
use super::AckSender;
use crate::{member::Health,
server::{outbound,
Server},
swim::{Ack,
Ping,
PingReq,
Swim,
SwimKind}};
use habitat_common::liveliness_checker;
use habitat_core::util::ToI64;
use lazy_static::lazy_static;
use log::{debug,
error,
trace};
use prometheus::{register_int_counter_vec,
register_int_gauge_vec,
IntCounterVec,
IntGaugeVec};
use std::{net::{SocketAddr,
UdpSocket},
thread,
time::Duration};
lazy_static! {
static ref SWIM_MESSAGES_RECEIVED: IntCounterVec =
register_int_counter_vec!("hab_butterfly_swim_messages_received_total",
"Total number of SWIM messages received",
&["type", "mode"]).unwrap();
static ref SWIM_BYTES_RECEIVED: IntGaugeVec =
register_int_gauge_vec!("hab_butterfly_swim_received_bytes",
"SWIM message size received in bytes",
&["type", "mode"]).unwrap();
}
pub fn spawn_thread(name: String,
server: Server,
socket: UdpSocket,
tx_outbound: AckSender)
-> std::io::Result<()> {
thread::Builder::new().name(name)
.spawn(move || -> ! { run_loop(&server, &socket, &tx_outbound) })
.map(|_| ())
}
/// Run the thread. Listens for messages up to 1k in size, and then processes them accordingly.
/// Takes the Server and a channel to send received Acks to the outbound thread.
pub fn run_loop(server: &Server, socket: &UdpSocket, tx_outbound: &AckSender) -> ! {
let mut recv_buffer: Vec<u8> = vec![0; 1024];
loop {
liveliness_checker::mark_thread_alive().and_divergent();
if server.paused() {
thread::sleep(Duration::from_millis(100));
continue;
}
match socket.recv_from(&mut recv_buffer[..]) {
Ok((length, addr)) => {
let swim_payload = match server.unwrap_wire(&recv_buffer[0..length]) {
Ok(swim_payload) => swim_payload,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error unwrapping protocol message, {}", e);
let label_values = &["unwrap_wire", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(length.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
let bytes_received = swim_payload.len();
let msg = match Swim::decode(&swim_payload) {
Ok(msg) => msg,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error decoding protocol message, {}", e);
let label_values = &["undecodable", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
// Setting a label_values variable here throws errors about moving borrowed
// content that I couldn't solve w/o clones. Leaving this for now. I'm sure
// there's a better way.
SWIM_BYTES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.inc();
trace!("SWIM Message: {:?}", msg);
match msg.kind {
SwimKind::Ping(ping) => {
if server.is_member_blocked_sblr(&ping.from.id) {
debug!("Not processing message from {} - it is blocked",
ping.from.id);
continue;
}
process_ping_mlw_smw_rhw(server, socket, addr, ping);
}
SwimKind::Ack(ack) => {
if server.is_member_blocked_sblr(&ack.from.id) && ack.forward_to.is_none() {
debug!("Not processing message from {} - it is blocked",
ack.from.id);
continue;
}
process_ack_mlw_smw_rhw(server, socket, tx_outbound, addr, ack);
}
SwimKind::PingReq(pingreq) => {
if server.is_member_blocked_sblr(&pingreq.from.id) {
debug!("Not processing message from {} - it is blocked",
pingreq.from.id);
continue;
}
process_pingreq_mlr_smr_rhw(server, socket, addr, pingreq);
}
}
}
Err(e) => {
// TODO: We can't use magic numbers here because the Supervisor runs on more
// than one platform. I'm sure these were added as specific OS errors for Linux
// but we need to also handle Windows & Mac.
match e.raw_os_error() {
Some(35) | Some(11) | Some(10035) | Some(10060) => {
// This is the normal non-blocking result, or a timeout
}
Some(_) => {
error!("UDP Receive error: {}", e);
debug!("UDP Receive error debug: {:?}", e);
}
None => {
error!("UDP Receive error: {}", e);
}
}
}
}
}
}
/// Process pingreq messages.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (read)
/// * `Server::member` (read)
/// * `RumorHeat::inner` (write)
fn process_pingreq_mlr_smr_rhw(server: &Server,
socket: &UdpSocket,
addr: SocketAddr,
mut msg: PingReq) {
if let Some(target) = server.member_list.get_cloned_mlr(&msg.target.id) {
msg.from.address = addr.ip().to_string();
let ping_msg = Ping { membership: vec![],
from: server.myself.lock_smr().to_member(),
forward_to: Some(msg.from.clone()), };
let swim = outbound::populate_membership_rumors_mlr_rhw(server, &target, ping_msg);
// Set the route-back address to the one we received the
// pingreq from
outbound::ping(server,
socket,
target.swim_socket_address(),
Some(&msg.from),
&swim);
} else {
error!("PingReq request {:?} for invalid target", msg);
}
}
/// Process ack messages; forwards to the outbound thread.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn | (server: &Server,
socket: &UdpSocket,
tx_outbound: &AckSender,
addr: SocketAddr,
mut msg: Ack) {
trace!("Ack from {}@{}", msg.from.id, addr);
if msg.forward_to.is_some() && *server.member_id != msg.forward_to.as_ref().unwrap().id {
let (forward_to_addr, from_addr) = {
let forward_to = msg.forward_to.as_ref().unwrap();
let forward_addr_str = format!("{}:{}", forward_to.address, forward_to.swim_port);
let forward_to_addr = match forward_addr_str.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Abandoning Ack forward: cannot parse member address: {}:{}, {}",
forward_to.address, forward_to.swim_port, e);
return;
}
};
trace!("Forwarding Ack from {}@{} to {}@{}",
msg.from.id,
addr,
forward_to.id,
forward_to.address,);
(forward_to_addr, addr.ip().to_string())
};
msg.from.address = from_addr;
outbound::forward_ack(server, socket, forward_to_addr, msg);
return;
}
let memberships = msg.membership.clone();
match tx_outbound.send((addr, msg)) {
Ok(()) => {
for membership in memberships {
server.insert_member_from_rumor_mlw_smw_rhw(membership.member, membership.health);
}
}
Err(e) => panic!("Outbound thread has died - this shouldn't happen: #{:?}", e),
}
}
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ping_mlw_smw_rhw(server: &Server, socket: &UdpSocket, addr: SocketAddr, mut msg: Ping) {
outbound::ack_mlr_smr_rhw(server, socket, &msg.from, addr, msg.forward_to);
// Populate the member for this sender with its remote address
msg.from.address = addr.ip().to_string();
trace!("Ping from {}@{}", msg.from.id, addr);
if msg.from.departed {
server.insert_member_mlw_rhw(msg.from, Health::Departed);
} else {
server.insert_member_mlw_rhw(msg.from, Health::Alive);
}
for membership in msg.membership {
| process_ack_mlw_smw_rhw | identifier_name |
inbound.rs | all the inbound SWIM messages.
use super::AckSender;
use crate::{member::Health,
server::{outbound,
Server},
swim::{Ack,
Ping,
PingReq,
Swim,
SwimKind}};
use habitat_common::liveliness_checker;
use habitat_core::util::ToI64;
use lazy_static::lazy_static;
use log::{debug,
error,
trace};
use prometheus::{register_int_counter_vec,
register_int_gauge_vec,
IntCounterVec,
IntGaugeVec};
use std::{net::{SocketAddr,
UdpSocket},
thread,
time::Duration};
lazy_static! {
static ref SWIM_MESSAGES_RECEIVED: IntCounterVec =
register_int_counter_vec!("hab_butterfly_swim_messages_received_total",
"Total number of SWIM messages received",
&["type", "mode"]).unwrap();
static ref SWIM_BYTES_RECEIVED: IntGaugeVec =
register_int_gauge_vec!("hab_butterfly_swim_received_bytes",
"SWIM message size received in bytes",
&["type", "mode"]).unwrap();
}
pub fn spawn_thread(name: String,
server: Server,
socket: UdpSocket,
tx_outbound: AckSender)
-> std::io::Result<()> {
thread::Builder::new().name(name)
.spawn(move || -> ! { run_loop(&server, &socket, &tx_outbound) })
.map(|_| ())
}
/// Run the thread. Listens for messages up to 1k in size, and then processes them accordingly.
/// Takes the Server and a channel to send received Acks to the outbound thread.
pub fn run_loop(server: &Server, socket: &UdpSocket, tx_outbound: &AckSender) -> ! {
let mut recv_buffer: Vec<u8> = vec![0; 1024];
loop {
liveliness_checker::mark_thread_alive().and_divergent();
if server.paused() |
match socket.recv_from(&mut recv_buffer[..]) {
Ok((length, addr)) => {
let swim_payload = match server.unwrap_wire(&recv_buffer[0..length]) {
Ok(swim_payload) => swim_payload,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error unwrapping protocol message, {}", e);
let label_values = &["unwrap_wire", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(length.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
let bytes_received = swim_payload.len();
let msg = match Swim::decode(&swim_payload) {
Ok(msg) => msg,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error decoding protocol message, {}", e);
let label_values = &["undecodable", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
// Setting a label_values variable here throws errors about moving borrowed
// content that I couldn't solve w/o clones. Leaving this for now. I'm sure
// there's a better way.
SWIM_BYTES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.inc();
trace!("SWIM Message: {:?}", msg);
match msg.kind {
SwimKind::Ping(ping) => {
if server.is_member_blocked_sblr(&ping.from.id) {
debug!("Not processing message from {} - it is blocked",
ping.from.id);
continue;
}
process_ping_mlw_smw_rhw(server, socket, addr, ping);
}
SwimKind::Ack(ack) => {
if server.is_member_blocked_sblr(&ack.from.id) && ack.forward_to.is_none() {
debug!("Not processing message from {} - it is blocked",
ack.from.id);
continue;
}
process_ack_mlw_smw_rhw(server, socket, tx_outbound, addr, ack);
}
SwimKind::PingReq(pingreq) => {
if server.is_member_blocked_sblr(&pingreq.from.id) {
debug!("Not processing message from {} - it is blocked",
pingreq.from.id);
continue;
}
process_pingreq_mlr_smr_rhw(server, socket, addr, pingreq);
}
}
}
Err(e) => {
// TODO: We can't use magic numbers here because the Supervisor runs on more
// than one platform. I'm sure these were added as specific OS errors for Linux
// but we need to also handle Windows & Mac.
match e.raw_os_error() {
Some(35) | Some(11) | Some(10035) | Some(10060) => {
// This is the normal non-blocking result, or a timeout
}
Some(_) => {
error!("UDP Receive error: {}", e);
debug!("UDP Receive error debug: {:?}", e);
}
None => {
error!("UDP Receive error: {}", e);
}
}
}
}
}
}
/// Process pingreq messages.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (read)
/// * `Server::member` (read)
/// * `RumorHeat::inner` (write)
fn process_pingreq_mlr_smr_rhw(server: &Server,
socket: &UdpSocket,
addr: SocketAddr,
mut msg: PingReq) {
if let Some(target) = server.member_list.get_cloned_mlr(&msg.target.id) {
msg.from.address = addr.ip().to_string();
let ping_msg = Ping { membership: vec![],
from: server.myself.lock_smr().to_member(),
forward_to: Some(msg.from.clone()), };
let swim = outbound::populate_membership_rumors_mlr_rhw(server, &target, ping_msg);
// Set the route-back address to the one we received the
// pingreq from
outbound::ping(server,
socket,
target.swim_socket_address(),
Some(&msg.from),
&swim);
} else {
error!("PingReq request {:?} for invalid target", msg);
}
}
/// Process ack messages; forwards to the outbound thread.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ack_mlw_smw_rhw(server: &Server,
socket: &UdpSocket,
tx_outbound: &AckSender,
addr: SocketAddr,
mut msg: Ack) {
trace!("Ack from {}@{}", msg.from.id, addr);
if msg.forward_to.is_some() && *server.member_id != msg.forward_to.as_ref().unwrap().id {
let (forward_to_addr, from_addr) = {
let forward_to = msg.forward_to.as_ref().unwrap();
let forward_addr_str = format!("{}:{}", forward_to.address, forward_to.swim_port);
let forward_to_addr = match forward_addr_str.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Abandoning Ack forward: cannot parse member address: {}:{}, {}",
forward_to.address, forward_to.swim_port, e);
return;
}
};
trace!("Forwarding Ack from {}@{} to {}@{}",
msg.from.id,
addr,
forward_to.id,
forward_to.address,);
(forward_to_addr, addr.ip().to_string())
};
msg.from.address = from_addr;
outbound::forward_ack(server, socket, forward_to_addr, msg);
return;
}
let memberships = msg.membership.clone();
match tx_outbound.send((addr, msg)) {
Ok(()) => {
for membership in memberships {
server.insert_member_from_rumor_mlw_smw_rhw(membership.member, membership.health);
}
}
Err(e) => panic!("Outbound thread has died - this shouldn't happen: #{:?}", e),
}
}
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ping_mlw_smw_rhw(server: &Server, socket: &UdpSocket, addr: SocketAddr, mut msg: Ping) {
outbound::ack_mlr_smr_rhw(server, socket, &msg.from, addr, msg.forward_to);
// Populate the member for this sender with its remote address
msg.from.address = addr.ip().to_string();
trace!("Ping from {}@{}", msg.from.id, addr);
if msg.from.departed {
server.insert_member_mlw_rhw(msg.from, Health::Departed);
} else {
server.insert_member_mlw_rhw(msg.from, Health::Alive);
}
for membership in msg.membership | {
thread::sleep(Duration::from_millis(100));
continue;
} | conditional_block |
inbound.rs | handles all the inbound SWIM messages.
use super::AckSender;
use crate::{member::Health,
server::{outbound,
Server},
swim::{Ack,
Ping,
PingReq,
Swim,
SwimKind}};
use habitat_common::liveliness_checker;
use habitat_core::util::ToI64;
use lazy_static::lazy_static;
use log::{debug,
error,
trace};
use prometheus::{register_int_counter_vec,
register_int_gauge_vec,
IntCounterVec,
IntGaugeVec};
use std::{net::{SocketAddr,
UdpSocket},
thread,
time::Duration};
lazy_static! {
static ref SWIM_MESSAGES_RECEIVED: IntCounterVec =
register_int_counter_vec!("hab_butterfly_swim_messages_received_total",
"Total number of SWIM messages received",
&["type", "mode"]).unwrap();
static ref SWIM_BYTES_RECEIVED: IntGaugeVec =
register_int_gauge_vec!("hab_butterfly_swim_received_bytes",
"SWIM message size received in bytes",
&["type", "mode"]).unwrap();
}
pub fn spawn_thread(name: String,
server: Server,
socket: UdpSocket,
tx_outbound: AckSender)
-> std::io::Result<()> {
thread::Builder::new().name(name)
.spawn(move || -> ! { run_loop(&server, &socket, &tx_outbound) })
.map(|_| ())
}
/// Run the thread. Listens for messages up to 1k in size, and then processes them accordingly.
/// Takes the Server and a channel to send received Acks to the outbound thread.
pub fn run_loop(server: &Server, socket: &UdpSocket, tx_outbound: &AckSender) -> ! {
let mut recv_buffer: Vec<u8> = vec![0; 1024];
loop {
liveliness_checker::mark_thread_alive().and_divergent();
if server.paused() {
thread::sleep(Duration::from_millis(100));
continue;
}
match socket.recv_from(&mut recv_buffer[..]) {
Ok((length, addr)) => {
let swim_payload = match server.unwrap_wire(&recv_buffer[0..length]) {
Ok(swim_payload) => swim_payload,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error unwrapping protocol message, {}", e);
let label_values = &["unwrap_wire", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(length.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
let bytes_received = swim_payload.len();
let msg = match Swim::decode(&swim_payload) {
Ok(msg) => msg,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error decoding protocol message, {}", e);
let label_values = &["undecodable", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
// Setting a label_values variable here throws errors about moving borrowed
// content that I couldn't solve w/o clones. Leaving this for now. I'm sure
// there's a better way.
SWIM_BYTES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.inc();
trace!("SWIM Message: {:?}", msg);
match msg.kind {
SwimKind::Ping(ping) => {
if server.is_member_blocked_sblr(&ping.from.id) {
debug!("Not processing message from {} - it is blocked",
ping.from.id);
continue;
}
process_ping_mlw_smw_rhw(server, socket, addr, ping);
}
SwimKind::Ack(ack) => {
if server.is_member_blocked_sblr(&ack.from.id) && ack.forward_to.is_none() {
debug!("Not processing message from {} - it is blocked",
ack.from.id);
continue;
}
process_ack_mlw_smw_rhw(server, socket, tx_outbound, addr, ack);
}
SwimKind::PingReq(pingreq) => {
if server.is_member_blocked_sblr(&pingreq.from.id) {
debug!("Not processing message from {} - it is blocked",
pingreq.from.id);
continue;
}
process_pingreq_mlr_smr_rhw(server, socket, addr, pingreq);
}
}
}
Err(e) => {
// TODO: We can't use magic numbers here because the Supervisor runs on more
// than one platform. I'm sure these were added as specific OS errors for Linux
// but we need to also handle Windows & Mac.
match e.raw_os_error() {
Some(35) | Some(11) | Some(10035) | Some(10060) => {
// This is the normal non-blocking result, or a timeout
}
Some(_) => {
error!("UDP Receive error: {}", e);
debug!("UDP Receive error debug: {:?}", e);
}
None => {
error!("UDP Receive error: {}", e);
}
}
}
}
}
}
/// Process pingreq messages.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (read)
/// * `Server::member` (read)
/// * `RumorHeat::inner` (write)
fn process_pingreq_mlr_smr_rhw(server: &Server,
socket: &UdpSocket,
addr: SocketAddr,
mut msg: PingReq) {
if let Some(target) = server.member_list.get_cloned_mlr(&msg.target.id) {
msg.from.address = addr.ip().to_string();
let ping_msg = Ping { membership: vec![],
from: server.myself.lock_smr().to_member(),
forward_to: Some(msg.from.clone()), };
let swim = outbound::populate_membership_rumors_mlr_rhw(server, &target, ping_msg);
// Set the route-back address to the one we received the
// pingreq from
outbound::ping(server,
socket,
target.swim_socket_address(),
Some(&msg.from),
&swim);
} else {
error!("PingReq request {:?} for invalid target", msg);
}
}
/// Process ack messages; forwards to the outbound thread.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write) | addr: SocketAddr,
mut msg: Ack) {
trace!("Ack from {}@{}", msg.from.id, addr);
if msg.forward_to.is_some() && *server.member_id != msg.forward_to.as_ref().unwrap().id {
let (forward_to_addr, from_addr) = {
let forward_to = msg.forward_to.as_ref().unwrap();
let forward_addr_str = format!("{}:{}", forward_to.address, forward_to.swim_port);
let forward_to_addr = match forward_addr_str.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Abandoning Ack forward: cannot parse member address: {}:{}, {}",
forward_to.address, forward_to.swim_port, e);
return;
}
};
trace!("Forwarding Ack from {}@{} to {}@{}",
msg.from.id,
addr,
forward_to.id,
forward_to.address,);
(forward_to_addr, addr.ip().to_string())
};
msg.from.address = from_addr;
outbound::forward_ack(server, socket, forward_to_addr, msg);
return;
}
let memberships = msg.membership.clone();
match tx_outbound.send((addr, msg)) {
Ok(()) => {
for membership in memberships {
server.insert_member_from_rumor_mlw_smw_rhw(membership.member, membership.health);
}
}
Err(e) => panic!("Outbound thread has died - this shouldn't happen: #{:?}", e),
}
}
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ping_mlw_smw_rhw(server: &Server, socket: &UdpSocket, addr: SocketAddr, mut msg: Ping) {
outbound::ack_mlr_smr_rhw(server, socket, &msg.from, addr, msg.forward_to);
// Populate the member for this sender with its remote address
msg.from.address = addr.ip().to_string();
trace!("Ping from {}@{}", msg.from.id, addr);
if msg.from.departed {
server.insert_member_mlw_rhw(msg.from, Health::Departed);
} else {
server.insert_member_mlw_rhw(msg.from, Health::Alive);
}
for membership in msg.membership {
| fn process_ack_mlw_smw_rhw(server: &Server,
socket: &UdpSocket,
tx_outbound: &AckSender, | random_line_split |
inbound.rs | bound,
Server},
swim::{Ack,
Ping,
PingReq,
Swim,
SwimKind}};
use habitat_common::liveliness_checker;
use habitat_core::util::ToI64;
use lazy_static::lazy_static;
use log::{debug,
error,
trace};
use prometheus::{register_int_counter_vec,
register_int_gauge_vec,
IntCounterVec,
IntGaugeVec};
use std::{net::{SocketAddr,
UdpSocket},
thread,
time::Duration};
lazy_static! {
static ref SWIM_MESSAGES_RECEIVED: IntCounterVec =
register_int_counter_vec!("hab_butterfly_swim_messages_received_total",
"Total number of SWIM messages received",
&["type", "mode"]).unwrap();
static ref SWIM_BYTES_RECEIVED: IntGaugeVec =
register_int_gauge_vec!("hab_butterfly_swim_received_bytes",
"SWIM message size received in bytes",
&["type", "mode"]).unwrap();
}
pub fn spawn_thread(name: String,
server: Server,
socket: UdpSocket,
tx_outbound: AckSender)
-> std::io::Result<()> {
thread::Builder::new().name(name)
.spawn(move || -> ! { run_loop(&server, &socket, &tx_outbound) })
.map(|_| ())
}
/// Run the thread. Listens for messages up to 1k in size, and then processes them accordingly.
/// Takes the Server and a channel to send received Acks to the outbound thread.
pub fn run_loop(server: &Server, socket: &UdpSocket, tx_outbound: &AckSender) -> ! {
let mut recv_buffer: Vec<u8> = vec![0; 1024];
loop {
liveliness_checker::mark_thread_alive().and_divergent();
if server.paused() {
thread::sleep(Duration::from_millis(100));
continue;
}
match socket.recv_from(&mut recv_buffer[..]) {
Ok((length, addr)) => {
let swim_payload = match server.unwrap_wire(&recv_buffer[0..length]) {
Ok(swim_payload) => swim_payload,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error unwrapping protocol message, {}", e);
let label_values = &["unwrap_wire", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(length.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
let bytes_received = swim_payload.len();
let msg = match Swim::decode(&swim_payload) {
Ok(msg) => msg,
Err(e) => {
// NOTE: In the future, we might want to block people who send us
// garbage all the time.
error!("Error decoding protocol message, {}", e);
let label_values = &["undecodable", "failure"];
SWIM_BYTES_RECEIVED.with_label_values(label_values)
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(label_values).inc();
continue;
}
};
// Setting a label_values variable here throws errors about moving borrowed
// content that I couldn't solve w/o clones. Leaving this for now. I'm sure
// there's a better way.
SWIM_BYTES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.set(bytes_received.to_i64());
SWIM_MESSAGES_RECEIVED.with_label_values(&[msg.kind.as_str(), "success"])
.inc();
trace!("SWIM Message: {:?}", msg);
match msg.kind {
SwimKind::Ping(ping) => {
if server.is_member_blocked_sblr(&ping.from.id) {
debug!("Not processing message from {} - it is blocked",
ping.from.id);
continue;
}
process_ping_mlw_smw_rhw(server, socket, addr, ping);
}
SwimKind::Ack(ack) => {
if server.is_member_blocked_sblr(&ack.from.id) && ack.forward_to.is_none() {
debug!("Not processing message from {} - it is blocked",
ack.from.id);
continue;
}
process_ack_mlw_smw_rhw(server, socket, tx_outbound, addr, ack);
}
SwimKind::PingReq(pingreq) => {
if server.is_member_blocked_sblr(&pingreq.from.id) {
debug!("Not processing message from {} - it is blocked",
pingreq.from.id);
continue;
}
process_pingreq_mlr_smr_rhw(server, socket, addr, pingreq);
}
}
}
Err(e) => {
// TODO: We can't use magic numbers here because the Supervisor runs on more
// than one platform. I'm sure these were added as specific OS errors for Linux
// but we need to also handle Windows & Mac.
match e.raw_os_error() {
Some(35) | Some(11) | Some(10035) | Some(10060) => {
// This is the normal non-blocking result, or a timeout
}
Some(_) => {
error!("UDP Receive error: {}", e);
debug!("UDP Receive error debug: {:?}", e);
}
None => {
error!("UDP Receive error: {}", e);
}
}
}
}
}
}
/// Process pingreq messages.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (read)
/// * `Server::member` (read)
/// * `RumorHeat::inner` (write)
fn process_pingreq_mlr_smr_rhw(server: &Server,
socket: &UdpSocket,
addr: SocketAddr,
mut msg: PingReq) {
if let Some(target) = server.member_list.get_cloned_mlr(&msg.target.id) {
msg.from.address = addr.ip().to_string();
let ping_msg = Ping { membership: vec![],
from: server.myself.lock_smr().to_member(),
forward_to: Some(msg.from.clone()), };
let swim = outbound::populate_membership_rumors_mlr_rhw(server, &target, ping_msg);
// Set the route-back address to the one we received the
// pingreq from
outbound::ping(server,
socket,
target.swim_socket_address(),
Some(&msg.from),
&swim);
} else {
error!("PingReq request {:?} for invalid target", msg);
}
}
/// Process ack messages; forwards to the outbound thread.
///
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ack_mlw_smw_rhw(server: &Server,
socket: &UdpSocket,
tx_outbound: &AckSender,
addr: SocketAddr,
mut msg: Ack) {
trace!("Ack from {}@{}", msg.from.id, addr);
if msg.forward_to.is_some() && *server.member_id != msg.forward_to.as_ref().unwrap().id {
let (forward_to_addr, from_addr) = {
let forward_to = msg.forward_to.as_ref().unwrap();
let forward_addr_str = format!("{}:{}", forward_to.address, forward_to.swim_port);
let forward_to_addr = match forward_addr_str.parse() {
Ok(addr) => addr,
Err(e) => {
error!("Abandoning Ack forward: cannot parse member address: {}:{}, {}",
forward_to.address, forward_to.swim_port, e);
return;
}
};
trace!("Forwarding Ack from {}@{} to {}@{}",
msg.from.id,
addr,
forward_to.id,
forward_to.address,);
(forward_to_addr, addr.ip().to_string())
};
msg.from.address = from_addr;
outbound::forward_ack(server, socket, forward_to_addr, msg);
return;
}
let memberships = msg.membership.clone();
match tx_outbound.send((addr, msg)) {
Ok(()) => {
for membership in memberships {
server.insert_member_from_rumor_mlw_smw_rhw(membership.member, membership.health);
}
}
Err(e) => panic!("Outbound thread has died - this shouldn't happen: #{:?}", e),
}
}
/// # Locking (see locking.md)
/// * `MemberList::entries` (write)
/// * `Server::member` (write)
/// * `RumorHeat::inner` (write)
fn process_ping_mlw_smw_rhw(server: &Server, socket: &UdpSocket, addr: SocketAddr, mut msg: Ping) | {
outbound::ack_mlr_smr_rhw(server, socket, &msg.from, addr, msg.forward_to);
// Populate the member for this sender with its remote address
msg.from.address = addr.ip().to_string();
trace!("Ping from {}@{}", msg.from.id, addr);
if msg.from.departed {
server.insert_member_mlw_rhw(msg.from, Health::Departed);
} else {
server.insert_member_mlw_rhw(msg.from, Health::Alive);
}
for membership in msg.membership {
server.insert_member_from_rumor_mlw_smw_rhw(membership.member, membership.health);
}
} | identifier_body |
|
sandbox_second.py | (mat):
empty_array = np.full((30, 30), 0, dtype=np.float32)
if(len(mat) != 0):
mat = np.asarray(mat, dtype=np.float32)
empty_array[:mat.shape[0], : mat.shape[1]] = mat
return np.expand_dims(empty_array, axis= 2)
#%%
train_input = []
cnt = 0
# use all the tasks in train
# use input and output
for task in train_data:
for sample in task['train']:
_input = sample['input']
_output = sample['output']
if len(_input) > 1:
train_input.append(_input)
else:
cnt += 1
if len(_output) > 1:
train_input.append(_output)
else:
cnt += 1
print('Thrown away samples: ')
print(cnt)
print('Total pretrain samples: ')
print(len(train_input))
#%%
# generate all the pretrain data
# 1. modified output tasks
# 2. y_labels
PRETRAIN_FUNCTIONS = [
pretrain.rotate_tasks,
pretrain.multiply_tasks,
pretrain.change_random_color_tasks,
pretrain.remove_line_tasks,
pretrain.shift_line_tasks,
pretrain.multiply_rotation_tasks,
pretrain.mirror_tasks,
#pretrain.double_line_with_multiple_colors_tasks,
]
train_output, y_labels, y_train_row_len, y_train_col_len = pretrain_generator.generate_pretrain_data(PRETRAIN_FUNCTIONS, train_input)
_train_input = [enhance_mat_30x30(task) for task in train_input]
train_input = []
for i in range(len(PRETRAIN_FUNCTIONS)):
train_input = train_input + _train_input
train_input = [np.array(el) for el in train_input]
train_output = [np.array(el) for el in train_output]
print("LEN train_input")
print(len(train_input))
print("LEN train_output")
print(len(train_output))
print("LEN y_labels")
print(len(y_labels[0]))
print("LEN y_train_row_len")
print(len(y_train_row_len))
print("LEN y_train_col_len")
print(len(y_train_col_len))
#%%
len(train_input)
len(train_output)
len(test_input)
#%%
# Raphael Model
# input
input_ = Input(shape=(30, 30, 1), name='train_input')
output_ = Input(shape=(30, 30, 1), name='train_output')
# convolution layers
x_1 = Conv2D(64, (3, 3), activation='relu')(input_)
x_1 = MaxPooling2D(pool_size=(2, 2))(x_1)
x_1 = Dropout(0.25)(x_1)
x_1 = Flatten()(x_1)
x_2 = Conv2D(64, (3, 3), activation='relu')(output_)
x_2 = MaxPooling2D(pool_size=(2, 2))(x_2)
x_2 = Dropout(0.25)(x_2)
x_2 = Flatten()(x_2)
merge = concatenate([x_1, x_2])
merge = Dense(128, activation='relu')(merge)
merge = Dense(128, activation='relu')(merge)
merge = Dropout(0.3)(merge)
pretrain.mirror_tasks,
pretrain.double_line_with_multiple_colors_tasks,
# regression layers
out_1 = Dense(128, activation='relu')(merge)
out_1 = Dense(1, activation='linear', name='fzn_rows')(out_1)
out_2 = Dense(128, activation='relu')(merge)
out_2 = Dense(1, activation='linear', name='fzn_cols')(out_2)
out_9 = Dense(128, activation='relu')(merge)
out_9 = Dense(1, activation='linear', name='fzn_removed_line_nr')(out_9)
out_11 = Dense(128, activation='relu')(merge)
out_11 = Dense(1, activation='linear', name='fzn_shifted_line_nr')(out_11)
# out_15 = Dense(128, activation='relu')(merge)
# out_15 = Dense(1, activation='linear', name='doubled_line_nr')(out_15)
# multi-label classification layers
out_4 = Dense(128, activation='relu')(merge)
out_4 = Dense(4, activation='sigmoid', name='fzn_rotation_angle')(out_4)
out_5 = Dense(128, activation='relu')(merge)
out_5 = Dense(3, activation='sigmoid', name='fzn_multiply_factor')(out_5)
out_6 = Dense(128, activation='relu')(merge)
out_6 = Dense(10, activation='sigmoid', name='fzn_changed_color_old')(out_6)
out_7 = Dense(128, activation='relu')(merge)
out_7 = Dense(10, activation='sigmoid', name='fzn_changed_color_new')(out_7)
out_8 = Dense(128, activation='relu')(merge)
out_8 = Dense(3, activation='sigmoid', name='fzn_removed_row_or_column')(out_8)
out_10 = Dense(128, activation='relu')(merge)
out_10 = Dense(3, activation='sigmoid', name='fzn_shifted_row_or_column')(out_10)
out_12 = Dense(128, activation='relu')(merge)
out_12 = Dense(3, activation='sigmoid', name='fzn_multiply_rotation_factor')(out_12)
out_13 = Dense(128, activation='relu')(merge)
out_13 = Dense(3, activation='sigmoid', name='fzn_multiply_mirror_factor')(out_13)
# out_14 = Dense(128, activation='relu')(merge)
# out_14 = Dense(3, activation='sigmoid', name='doubled_row_or_column')(out_14)
model = Model(inputs=[input_, output_], outputs=[
out_1, out_2, out_4,
out_5, out_6, out_7,
out_8, out_9, out_10,
out_11, out_12, out_13,
# out_14, out_15
])
opt = Adam(lr=1e-3, decay=1e-3)
losses = {
"fzn_rows": "mean_absolute_error",
"fzn_cols": "mean_absolute_error",
"fzn_removed_line_nr": "mean_absolute_error",
"fzn_shifted_line_nr": "mean_absolute_error",
"fzn_rotation_angle": "binary_crossentropy",
"fzn_multiply_factor": "binary_crossentropy",
"fzn_changed_color_old": "binary_crossentropy",
"fzn_changed_color_new": "binary_crossentropy",
"fzn_removed_row_or_column": "binary_crossentropy",
"fzn_shifted_row_or_column": "binary_crossentropy",
"fzn_multiply_rotation_factor": "binary_crossentropy",
"fzn_multiply_mirror_factor": "binary_crossentropy",
# "doubled_row_or_column": "binary_crossentropy",
# "doubled_line_nr": "mean_absolute_error"
}
model.compile(loss=losses, optimizer=opt)
#%%
#%%
# Train the model
start = time.time()
history = model.fit(
[
np.array(train_input),
np.array(train_output)
],
[
np.array(y_train_col_len),
np.array(y_train_row_len),
np.array(to_categorical(y_labels[0])),
np.array(to_categorical(y_labels[1])),
np.array(to_categorical(y_labels[2])),
np.array(to_categorical(y_labels[3])),
np.array(to_categorical(y_labels[4])),
np.array(y_labels[5]),
np.array(to_categorical(y_labels[6])),
np.array(y_labels[7]),
np.array(to_categorical(y_labels[8])),
np.array(to_categorical(y_labels[9])),
],
epochs=100)
print('training time {} minutes'.format(round(time.time()-start)/60))
#%%
log = Logger('2_input_12_pretrain')
log.save_experiment(model, history)
#%%
plt.plot_loss(log, 'loss', save=True)
#%%
log = Logger('2_input_12_pretrain')
model = log.load_experiment()
model.summary()
#%%
"""
path_frozen = 'data/weights_frozen/{}'.format(log.name)
if not os.path.exists(path_frozen):
os.mkdir(path_frozen)
model.save(path_frozen)
"""
#%%
for layer in model.layers[:-12]:
layer.trainable = False
#model.layers.pop()
model.summary()
#%%
# predict whole task model
input_test_task = Input(shape=(30, 30, 1), name='test_input')
input_frozen_model = concatenate(model.output, name='concat_frozen_layers')
# convolution layers
x_test = Conv2D(64, (3, 3), activation='relu', name='test_convolution')(input_test_task)
x_test = MaxPooling2D(pool_size=(2, 2), name='test_pooling')(x_test)
x_test = Dropout(0.25, name | enhance_mat_30x30 | identifier_name |
|
sandbox_second.py | in train
# use input and output
for task in train_data:
for sample in task['train']:
_input = sample['input']
_output = sample['output']
if len(_input) > 1:
train_input.append(_input)
else:
cnt += 1
if len(_output) > 1:
train_input.append(_output)
else:
cnt += 1
print('Thrown away samples: ')
print(cnt)
print('Total pretrain samples: ')
print(len(train_input))
#%%
# generate all the pretrain data
# 1. modified output tasks
# 2. y_labels
PRETRAIN_FUNCTIONS = [
pretrain.rotate_tasks,
pretrain.multiply_tasks,
pretrain.change_random_color_tasks,
pretrain.remove_line_tasks,
pretrain.shift_line_tasks,
pretrain.multiply_rotation_tasks,
pretrain.mirror_tasks,
#pretrain.double_line_with_multiple_colors_tasks,
]
train_output, y_labels, y_train_row_len, y_train_col_len = pretrain_generator.generate_pretrain_data(PRETRAIN_FUNCTIONS, train_input)
_train_input = [enhance_mat_30x30(task) for task in train_input]
train_input = []
for i in range(len(PRETRAIN_FUNCTIONS)):
train_input = train_input + _train_input
train_input = [np.array(el) for el in train_input]
train_output = [np.array(el) for el in train_output]
print("LEN train_input")
print(len(train_input))
print("LEN train_output")
print(len(train_output))
print("LEN y_labels")
print(len(y_labels[0]))
print("LEN y_train_row_len")
print(len(y_train_row_len))
print("LEN y_train_col_len")
print(len(y_train_col_len))
#%%
len(train_input)
len(train_output)
len(test_input)
#%%
# Raphael Model
# input
input_ = Input(shape=(30, 30, 1), name='train_input')
output_ = Input(shape=(30, 30, 1), name='train_output')
# convolution layers
x_1 = Conv2D(64, (3, 3), activation='relu')(input_)
x_1 = MaxPooling2D(pool_size=(2, 2))(x_1)
x_1 = Dropout(0.25)(x_1)
x_1 = Flatten()(x_1)
x_2 = Conv2D(64, (3, 3), activation='relu')(output_) |
merge = Dense(128, activation='relu')(merge)
merge = Dense(128, activation='relu')(merge)
merge = Dropout(0.3)(merge)
pretrain.mirror_tasks,
pretrain.double_line_with_multiple_colors_tasks,
# regression layers
out_1 = Dense(128, activation='relu')(merge)
out_1 = Dense(1, activation='linear', name='fzn_rows')(out_1)
out_2 = Dense(128, activation='relu')(merge)
out_2 = Dense(1, activation='linear', name='fzn_cols')(out_2)
out_9 = Dense(128, activation='relu')(merge)
out_9 = Dense(1, activation='linear', name='fzn_removed_line_nr')(out_9)
out_11 = Dense(128, activation='relu')(merge)
out_11 = Dense(1, activation='linear', name='fzn_shifted_line_nr')(out_11)
# out_15 = Dense(128, activation='relu')(merge)
# out_15 = Dense(1, activation='linear', name='doubled_line_nr')(out_15)
# multi-label classification layers
out_4 = Dense(128, activation='relu')(merge)
out_4 = Dense(4, activation='sigmoid', name='fzn_rotation_angle')(out_4)
out_5 = Dense(128, activation='relu')(merge)
out_5 = Dense(3, activation='sigmoid', name='fzn_multiply_factor')(out_5)
out_6 = Dense(128, activation='relu')(merge)
out_6 = Dense(10, activation='sigmoid', name='fzn_changed_color_old')(out_6)
out_7 = Dense(128, activation='relu')(merge)
out_7 = Dense(10, activation='sigmoid', name='fzn_changed_color_new')(out_7)
out_8 = Dense(128, activation='relu')(merge)
out_8 = Dense(3, activation='sigmoid', name='fzn_removed_row_or_column')(out_8)
out_10 = Dense(128, activation='relu')(merge)
out_10 = Dense(3, activation='sigmoid', name='fzn_shifted_row_or_column')(out_10)
out_12 = Dense(128, activation='relu')(merge)
out_12 = Dense(3, activation='sigmoid', name='fzn_multiply_rotation_factor')(out_12)
out_13 = Dense(128, activation='relu')(merge)
out_13 = Dense(3, activation='sigmoid', name='fzn_multiply_mirror_factor')(out_13)
# out_14 = Dense(128, activation='relu')(merge)
# out_14 = Dense(3, activation='sigmoid', name='doubled_row_or_column')(out_14)
model = Model(inputs=[input_, output_], outputs=[
out_1, out_2, out_4,
out_5, out_6, out_7,
out_8, out_9, out_10,
out_11, out_12, out_13,
# out_14, out_15
])
opt = Adam(lr=1e-3, decay=1e-3)
losses = {
"fzn_rows": "mean_absolute_error",
"fzn_cols": "mean_absolute_error",
"fzn_removed_line_nr": "mean_absolute_error",
"fzn_shifted_line_nr": "mean_absolute_error",
"fzn_rotation_angle": "binary_crossentropy",
"fzn_multiply_factor": "binary_crossentropy",
"fzn_changed_color_old": "binary_crossentropy",
"fzn_changed_color_new": "binary_crossentropy",
"fzn_removed_row_or_column": "binary_crossentropy",
"fzn_shifted_row_or_column": "binary_crossentropy",
"fzn_multiply_rotation_factor": "binary_crossentropy",
"fzn_multiply_mirror_factor": "binary_crossentropy",
# "doubled_row_or_column": "binary_crossentropy",
# "doubled_line_nr": "mean_absolute_error"
}
model.compile(loss=losses, optimizer=opt)
#%%
#%%
# Train the model
start = time.time()
history = model.fit(
[
np.array(train_input),
np.array(train_output)
],
[
np.array(y_train_col_len),
np.array(y_train_row_len),
np.array(to_categorical(y_labels[0])),
np.array(to_categorical(y_labels[1])),
np.array(to_categorical(y_labels[2])),
np.array(to_categorical(y_labels[3])),
np.array(to_categorical(y_labels[4])),
np.array(y_labels[5]),
np.array(to_categorical(y_labels[6])),
np.array(y_labels[7]),
np.array(to_categorical(y_labels[8])),
np.array(to_categorical(y_labels[9])),
],
epochs=100)
print('training time {} minutes'.format(round(time.time()-start)/60))
#%%
log = Logger('2_input_12_pretrain')
log.save_experiment(model, history)
#%%
plt.plot_loss(log, 'loss', save=True)
#%%
log = Logger('2_input_12_pretrain')
model = log.load_experiment()
model.summary()
#%%
"""
path_frozen = 'data/weights_frozen/{}'.format(log.name)
if not os.path.exists(path_frozen):
os.mkdir(path_frozen)
model.save(path_frozen)
"""
#%%
for layer in model.layers[:-12]:
layer.trainable = False
#model.layers.pop()
model.summary()
#%%
# predict whole task model
input_test_task = Input(shape=(30, 30, 1), name='test_input')
input_frozen_model = concatenate(model.output, name='concat_frozen_layers')
# convolution layers
x_test = Conv2D(64, (3, 3), activation='relu', name='test_convolution')(input_test_task)
x_test = MaxPooling2D(pool_size=(2, 2), name='test_pooling')(x_test)
x_test = Dropout(0.25, name='test_dropout')(x_test)
x_test = Flatten(name='test_flatten')(x_test)
# merge frozen layers
merge_frozen = concatenate([
x_test,
input_frozen_model
], name='concat_test_frozen')
# out layers
out_final = Dense(128, activation='relu', name='test_out_dense_1')(merge_frozen)
out_final = Dense(128, activation='relu', name='test_out_dense_2')(out_final | x_2 = MaxPooling2D(pool_size=(2, 2))(x_2)
x_2 = Dropout(0.25)(x_2)
x_2 = Flatten()(x_2)
merge = concatenate([x_1, x_2]) | random_line_split |
sandbox_second.py | tasks in train
# use input and output
for task in train_data:
for sample in task['train']:
|
print('Thrown away samples: ')
print(cnt)
print('Total pretrain samples: ')
print(len(train_input))
#%%
# generate all the pretrain data
# 1. modified output tasks
# 2. y_labels
PRETRAIN_FUNCTIONS = [
pretrain.rotate_tasks,
pretrain.multiply_tasks,
pretrain.change_random_color_tasks,
pretrain.remove_line_tasks,
pretrain.shift_line_tasks,
pretrain.multiply_rotation_tasks,
pretrain.mirror_tasks,
#pretrain.double_line_with_multiple_colors_tasks,
]
train_output, y_labels, y_train_row_len, y_train_col_len = pretrain_generator.generate_pretrain_data(PRETRAIN_FUNCTIONS, train_input)
_train_input = [enhance_mat_30x30(task) for task in train_input]
train_input = []
for i in range(len(PRETRAIN_FUNCTIONS)):
train_input = train_input + _train_input
train_input = [np.array(el) for el in train_input]
train_output = [np.array(el) for el in train_output]
print("LEN train_input")
print(len(train_input))
print("LEN train_output")
print(len(train_output))
print("LEN y_labels")
print(len(y_labels[0]))
print("LEN y_train_row_len")
print(len(y_train_row_len))
print("LEN y_train_col_len")
print(len(y_train_col_len))
#%%
len(train_input)
len(train_output)
len(test_input)
#%%
# Raphael Model
# input
input_ = Input(shape=(30, 30, 1), name='train_input')
output_ = Input(shape=(30, 30, 1), name='train_output')
# convolution layers
x_1 = Conv2D(64, (3, 3), activation='relu')(input_)
x_1 = MaxPooling2D(pool_size=(2, 2))(x_1)
x_1 = Dropout(0.25)(x_1)
x_1 = Flatten()(x_1)
x_2 = Conv2D(64, (3, 3), activation='relu')(output_)
x_2 = MaxPooling2D(pool_size=(2, 2))(x_2)
x_2 = Dropout(0.25)(x_2)
x_2 = Flatten()(x_2)
merge = concatenate([x_1, x_2])
merge = Dense(128, activation='relu')(merge)
merge = Dense(128, activation='relu')(merge)
merge = Dropout(0.3)(merge)
pretrain.mirror_tasks,
pretrain.double_line_with_multiple_colors_tasks,
# regression layers
out_1 = Dense(128, activation='relu')(merge)
out_1 = Dense(1, activation='linear', name='fzn_rows')(out_1)
out_2 = Dense(128, activation='relu')(merge)
out_2 = Dense(1, activation='linear', name='fzn_cols')(out_2)
out_9 = Dense(128, activation='relu')(merge)
out_9 = Dense(1, activation='linear', name='fzn_removed_line_nr')(out_9)
out_11 = Dense(128, activation='relu')(merge)
out_11 = Dense(1, activation='linear', name='fzn_shifted_line_nr')(out_11)
# out_15 = Dense(128, activation='relu')(merge)
# out_15 = Dense(1, activation='linear', name='doubled_line_nr')(out_15)
# multi-label classification layers
out_4 = Dense(128, activation='relu')(merge)
out_4 = Dense(4, activation='sigmoid', name='fzn_rotation_angle')(out_4)
out_5 = Dense(128, activation='relu')(merge)
out_5 = Dense(3, activation='sigmoid', name='fzn_multiply_factor')(out_5)
out_6 = Dense(128, activation='relu')(merge)
out_6 = Dense(10, activation='sigmoid', name='fzn_changed_color_old')(out_6)
out_7 = Dense(128, activation='relu')(merge)
out_7 = Dense(10, activation='sigmoid', name='fzn_changed_color_new')(out_7)
out_8 = Dense(128, activation='relu')(merge)
out_8 = Dense(3, activation='sigmoid', name='fzn_removed_row_or_column')(out_8)
out_10 = Dense(128, activation='relu')(merge)
out_10 = Dense(3, activation='sigmoid', name='fzn_shifted_row_or_column')(out_10)
out_12 = Dense(128, activation='relu')(merge)
out_12 = Dense(3, activation='sigmoid', name='fzn_multiply_rotation_factor')(out_12)
out_13 = Dense(128, activation='relu')(merge)
out_13 = Dense(3, activation='sigmoid', name='fzn_multiply_mirror_factor')(out_13)
# out_14 = Dense(128, activation='relu')(merge)
# out_14 = Dense(3, activation='sigmoid', name='doubled_row_or_column')(out_14)
model = Model(inputs=[input_, output_], outputs=[
out_1, out_2, out_4,
out_5, out_6, out_7,
out_8, out_9, out_10,
out_11, out_12, out_13,
# out_14, out_15
])
opt = Adam(lr=1e-3, decay=1e-3)
losses = {
"fzn_rows": "mean_absolute_error",
"fzn_cols": "mean_absolute_error",
"fzn_removed_line_nr": "mean_absolute_error",
"fzn_shifted_line_nr": "mean_absolute_error",
"fzn_rotation_angle": "binary_crossentropy",
"fzn_multiply_factor": "binary_crossentropy",
"fzn_changed_color_old": "binary_crossentropy",
"fzn_changed_color_new": "binary_crossentropy",
"fzn_removed_row_or_column": "binary_crossentropy",
"fzn_shifted_row_or_column": "binary_crossentropy",
"fzn_multiply_rotation_factor": "binary_crossentropy",
"fzn_multiply_mirror_factor": "binary_crossentropy",
# "doubled_row_or_column": "binary_crossentropy",
# "doubled_line_nr": "mean_absolute_error"
}
model.compile(loss=losses, optimizer=opt)
#%%
#%%
# Train the model
start = time.time()
history = model.fit(
[
np.array(train_input),
np.array(train_output)
],
[
np.array(y_train_col_len),
np.array(y_train_row_len),
np.array(to_categorical(y_labels[0])),
np.array(to_categorical(y_labels[1])),
np.array(to_categorical(y_labels[2])),
np.array(to_categorical(y_labels[3])),
np.array(to_categorical(y_labels[4])),
np.array(y_labels[5]),
np.array(to_categorical(y_labels[6])),
np.array(y_labels[7]),
np.array(to_categorical(y_labels[8])),
np.array(to_categorical(y_labels[9])),
],
epochs=100)
print('training time {} minutes'.format(round(time.time()-start)/60))
#%%
log = Logger('2_input_12_pretrain')
log.save_experiment(model, history)
#%%
plt.plot_loss(log, 'loss', save=True)
#%%
log = Logger('2_input_12_pretrain')
model = log.load_experiment()
model.summary()
#%%
"""
path_frozen = 'data/weights_frozen/{}'.format(log.name)
if not os.path.exists(path_frozen):
os.mkdir(path_frozen)
model.save(path_frozen)
"""
#%%
for layer in model.layers[:-12]:
layer.trainable = False
#model.layers.pop()
model.summary()
#%%
# predict whole task model
input_test_task = Input(shape=(30, 30, 1), name='test_input')
input_frozen_model = concatenate(model.output, name='concat_frozen_layers')
# convolution layers
x_test = Conv2D(64, (3, 3), activation='relu', name='test_convolution')(input_test_task)
x_test = MaxPooling2D(pool_size=(2, 2), name='test_pooling')(x_test)
x_test = Dropout(0.25, name='test_dropout')(x_test)
x_test = Flatten(name='test_flatten')(x_test)
# merge frozen layers
merge_frozen = concatenate([
x_test,
input_frozen_model
], name='concat_test_frozen')
# out layers
out_final = Dense(128, activation='relu', name='test_out_dense_1')(merge_frozen)
out_final = Dense(128, activation='relu', name='test_out_dense_2')(out_final)
| _input = sample['input']
_output = sample['output']
if len(_input) > 1:
train_input.append(_input)
else:
cnt += 1
if len(_output) > 1:
train_input.append(_output)
else:
cnt += 1 | conditional_block |
sandbox_second.py |
#%%
train_input = []
cnt = 0
# use all the tasks in train
# use input and output
for task in train_data:
for sample in task['train']:
_input = sample['input']
_output = sample['output']
if len(_input) > 1:
train_input.append(_input)
else:
cnt += 1
if len(_output) > 1:
train_input.append(_output)
else:
cnt += 1
print('Thrown away samples: ')
print(cnt)
print('Total pretrain samples: ')
print(len(train_input))
#%%
# generate all the pretrain data
# 1. modified output tasks
# 2. y_labels
PRETRAIN_FUNCTIONS = [
pretrain.rotate_tasks,
pretrain.multiply_tasks,
pretrain.change_random_color_tasks,
pretrain.remove_line_tasks,
pretrain.shift_line_tasks,
pretrain.multiply_rotation_tasks,
pretrain.mirror_tasks,
#pretrain.double_line_with_multiple_colors_tasks,
]
train_output, y_labels, y_train_row_len, y_train_col_len = pretrain_generator.generate_pretrain_data(PRETRAIN_FUNCTIONS, train_input)
_train_input = [enhance_mat_30x30(task) for task in train_input]
train_input = []
for i in range(len(PRETRAIN_FUNCTIONS)):
train_input = train_input + _train_input
train_input = [np.array(el) for el in train_input]
train_output = [np.array(el) for el in train_output]
print("LEN train_input")
print(len(train_input))
print("LEN train_output")
print(len(train_output))
print("LEN y_labels")
print(len(y_labels[0]))
print("LEN y_train_row_len")
print(len(y_train_row_len))
print("LEN y_train_col_len")
print(len(y_train_col_len))
#%%
len(train_input)
len(train_output)
len(test_input)
#%%
# Raphael Model
# input
input_ = Input(shape=(30, 30, 1), name='train_input')
output_ = Input(shape=(30, 30, 1), name='train_output')
# convolution layers
x_1 = Conv2D(64, (3, 3), activation='relu')(input_)
x_1 = MaxPooling2D(pool_size=(2, 2))(x_1)
x_1 = Dropout(0.25)(x_1)
x_1 = Flatten()(x_1)
x_2 = Conv2D(64, (3, 3), activation='relu')(output_)
x_2 = MaxPooling2D(pool_size=(2, 2))(x_2)
x_2 = Dropout(0.25)(x_2)
x_2 = Flatten()(x_2)
merge = concatenate([x_1, x_2])
merge = Dense(128, activation='relu')(merge)
merge = Dense(128, activation='relu')(merge)
merge = Dropout(0.3)(merge)
pretrain.mirror_tasks,
pretrain.double_line_with_multiple_colors_tasks,
# regression layers
out_1 = Dense(128, activation='relu')(merge)
out_1 = Dense(1, activation='linear', name='fzn_rows')(out_1)
out_2 = Dense(128, activation='relu')(merge)
out_2 = Dense(1, activation='linear', name='fzn_cols')(out_2)
out_9 = Dense(128, activation='relu')(merge)
out_9 = Dense(1, activation='linear', name='fzn_removed_line_nr')(out_9)
out_11 = Dense(128, activation='relu')(merge)
out_11 = Dense(1, activation='linear', name='fzn_shifted_line_nr')(out_11)
# out_15 = Dense(128, activation='relu')(merge)
# out_15 = Dense(1, activation='linear', name='doubled_line_nr')(out_15)
# multi-label classification layers
out_4 = Dense(128, activation='relu')(merge)
out_4 = Dense(4, activation='sigmoid', name='fzn_rotation_angle')(out_4)
out_5 = Dense(128, activation='relu')(merge)
out_5 = Dense(3, activation='sigmoid', name='fzn_multiply_factor')(out_5)
out_6 = Dense(128, activation='relu')(merge)
out_6 = Dense(10, activation='sigmoid', name='fzn_changed_color_old')(out_6)
out_7 = Dense(128, activation='relu')(merge)
out_7 = Dense(10, activation='sigmoid', name='fzn_changed_color_new')(out_7)
out_8 = Dense(128, activation='relu')(merge)
out_8 = Dense(3, activation='sigmoid', name='fzn_removed_row_or_column')(out_8)
out_10 = Dense(128, activation='relu')(merge)
out_10 = Dense(3, activation='sigmoid', name='fzn_shifted_row_or_column')(out_10)
out_12 = Dense(128, activation='relu')(merge)
out_12 = Dense(3, activation='sigmoid', name='fzn_multiply_rotation_factor')(out_12)
out_13 = Dense(128, activation='relu')(merge)
out_13 = Dense(3, activation='sigmoid', name='fzn_multiply_mirror_factor')(out_13)
# out_14 = Dense(128, activation='relu')(merge)
# out_14 = Dense(3, activation='sigmoid', name='doubled_row_or_column')(out_14)
model = Model(inputs=[input_, output_], outputs=[
out_1, out_2, out_4,
out_5, out_6, out_7,
out_8, out_9, out_10,
out_11, out_12, out_13,
# out_14, out_15
])
opt = Adam(lr=1e-3, decay=1e-3)
losses = {
"fzn_rows": "mean_absolute_error",
"fzn_cols": "mean_absolute_error",
"fzn_removed_line_nr": "mean_absolute_error",
"fzn_shifted_line_nr": "mean_absolute_error",
"fzn_rotation_angle": "binary_crossentropy",
"fzn_multiply_factor": "binary_crossentropy",
"fzn_changed_color_old": "binary_crossentropy",
"fzn_changed_color_new": "binary_crossentropy",
"fzn_removed_row_or_column": "binary_crossentropy",
"fzn_shifted_row_or_column": "binary_crossentropy",
"fzn_multiply_rotation_factor": "binary_crossentropy",
"fzn_multiply_mirror_factor": "binary_crossentropy",
# "doubled_row_or_column": "binary_crossentropy",
# "doubled_line_nr": "mean_absolute_error"
}
model.compile(loss=losses, optimizer=opt)
#%%
#%%
# Train the model
start = time.time()
history = model.fit(
[
np.array(train_input),
np.array(train_output)
],
[
np.array(y_train_col_len),
np.array(y_train_row_len),
np.array(to_categorical(y_labels[0])),
np.array(to_categorical(y_labels[1])),
np.array(to_categorical(y_labels[2])),
np.array(to_categorical(y_labels[3])),
np.array(to_categorical(y_labels[4])),
np.array(y_labels[5]),
np.array(to_categorical(y_labels[6])),
np.array(y_labels[7]),
np.array(to_categorical(y_labels[8])),
np.array(to_categorical(y_labels[9])),
],
epochs=100)
print('training time {} minutes'.format(round(time.time()-start)/60))
#%%
log = Logger('2_input_12_pretrain')
log.save_experiment(model, history)
#%%
plt.plot_loss(log, 'loss', save=True)
#%%
log = Logger('2_input_12_pretrain')
model = log.load_experiment()
model.summary()
#%%
"""
path_frozen = 'data/weights_frozen/{}'.format(log.name)
if not os.path.exists(path_frozen):
os.mkdir(path_frozen)
model.save(path_frozen)
"""
#%%
for layer in model.layers[:-12]:
layer.trainable = False
#model.layers.pop()
model.summary()
#%%
# predict whole task model
input_test_task = Input(shape=(30, 30, 1), name='test_input')
input_frozen_model = concatenate(model.output, name='concat_frozen_layers')
# convolution layers
x_test = Conv2D(64, (3, 3), activation='relu', name='test_convolution')(input_test_task)
x_test = MaxPooling2D(pool_size=(2, 2), name='test_pooling')(x_test)
x_test = Dropout(0.25, name='test_dropout')(x_test)
x_test = Flatten(name | empty_array = np.full((30, 30), 0, dtype=np.float32)
if(len(mat) != 0):
mat = np.asarray(mat, dtype=np.float32)
empty_array[:mat.shape[0], : mat.shape[1]] = mat
return np.expand_dims(empty_array, axis= 2) | identifier_body |
|
polyres.py | )
# No options means an error is going to happen later, but for now,
# just return some placeholders so that we can make it to the
# error later.
if not options:
return {k: _SINGLETON for k in set(range(num_args)) | kwargs_names}
fts: Dict[Union[int, str], ft.TypeModifier] = {}
for choice in options:
for barg in choice.args:
if not barg.param or barg.arg_id is None:
continue
ft = barg.param.get_typemod(ctx.env.schema)
if barg.arg_id in fts and fts[barg.arg_id] != ft:
if ft == _SET_OF or fts[barg.arg_id] == _SET_OF:
raise errors.QueryError(
f'argument could be SET OF or not in call to '
f'{candidates[0].get_verbosename(ctx.env.schema)}: '
f'seems like a stdlib bug!')
else:
# If there is a mix between OPTIONAL and SINGLETON
# arguments in possible call sites, we just call it
# optional. Generated code quality will be a little
# worse but still correct.
fts[barg.arg_id] = _OPTIONAL
else:
fts[barg.arg_id] = ft
return fts
def find_callable(
candidates: Iterable[s_func.CallableLike], *,
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
basic_matching_only: bool=False,
ctx: context.ContextLevel) -> List[BoundCall]:
implicit_cast_distance = None
matched = []
candidates = list(candidates)
for candidate in candidates:
call = try_bind_call_args(
args, kwargs, candidate, basic_matching_only, ctx=ctx)
if call is None:
continue
total_cd = sum(barg.cast_distance for barg in call.args)
if implicit_cast_distance is None:
implicit_cast_distance = total_cd
matched.append(call)
elif implicit_cast_distance == total_cd:
matched.append(call)
elif implicit_cast_distance > total_cd:
implicit_cast_distance = total_cd
matched = [call]
if len(matched) <= 1:
# Unambiguios resolution
return matched
else:
# Ambiguous resolution, try to disambiguate by
# checking for total type distance.
type_dist = None
remaining = []
for call in matched:
call_type_dist = 0
for barg in call.args: | paramtype = barg.param.get_type(ctx.env.schema)
arg_type_dist = barg.valtype.get_common_parent_type_distance(
paramtype, ctx.env.schema)
call_type_dist += arg_type_dist
if type_dist is None:
type_dist = call_type_dist
remaining.append(call)
elif type_dist == call_type_dist:
remaining.append(call)
elif type_dist > call_type_dist:
type_dist = call_type_dist
remaining = [call]
return remaining
def try_bind_call_args(
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
func: s_func.CallableLike,
basic_matching_only: bool,
*,
ctx: context.ContextLevel) -> Optional[BoundCall]:
return_type = func.get_return_type(ctx.env.schema)
is_abstract = func.get_abstract(ctx.env.schema)
resolved_poly_base_type: Optional[s_types.Type] = None
def _get_cast_distance(
arg: irast.Set,
arg_type: s_types.Type,
param_type: s_types.Type,
) -> int:
nonlocal resolved_poly_base_type
if basic_matching_only:
return 0
if in_polymorphic_func:
# Compiling a body of a polymorphic function.
if arg_type.is_polymorphic(schema):
if param_type.is_polymorphic(schema):
if arg_type.test_polymorphic(schema, param_type):
return 0
else:
return -1
else:
if arg_type.resolve_polymorphic(schema, param_type):
return 0
else:
return -1
if param_type.is_polymorphic(schema):
if not arg_type.test_polymorphic(schema, param_type):
return -1
resolved = param_type.resolve_polymorphic(schema, arg_type)
if resolved is None:
return -1
if resolved_poly_base_type is None:
resolved_poly_base_type = resolved
if resolved_poly_base_type == resolved:
if is_abstract:
return s_types.MAX_TYPE_DISTANCE
elif arg_type.is_range() and param_type.is_multirange():
# Ranges are implicitly cast into multiranges of the same
# type, so they are compatible as far as polymorphic
# resolution goes, but it's still 1 cast.
return 1
else:
return 0
ctx.env.schema, ct = (
resolved_poly_base_type.find_common_implicitly_castable_type(
resolved,
ctx.env.schema,
)
)
if ct is not None:
# If we found a common implicitly castable type, we
# refine our resolved_poly_base_type to be that as the
# more general case.
resolved_poly_base_type = ct
return s_types.MAX_TYPE_DISTANCE if is_abstract else 0
else:
return -1
if arg_type.issubclass(schema, param_type):
return 0
return arg_type.get_implicit_cast_distance(param_type, schema)
schema = ctx.env.schema
in_polymorphic_func = (
ctx.env.options.func_params is not None and
ctx.env.options.func_params.has_polymorphic(schema)
)
has_empty_variadic = False
no_args_call = not args and not kwargs
has_inlined_defaults = func.has_inlined_defaults(schema)
func_params = func.get_params(schema)
if not func_params:
if no_args_call:
# Match: `func` is a function without parameters
# being called with no arguments.
bargs: List[BoundArg] = []
if has_inlined_defaults:
bytes_t = ctx.env.get_schema_type_and_track(
sn.QualName('std', 'bytes'))
typeref = typegen.type_to_typeref(bytes_t, env=ctx.env)
argval = setgen.ensure_set(
irast.BytesConstant(value=b'\x00', typeref=typeref),
typehint=bytes_t,
ctx=ctx)
bargs = [BoundArg(None, bytes_t, argval, bytes_t, 0, -1)]
return BoundCall(
func, bargs, set(),
return_type, False)
else:
# No match: `func` is a function without parameters
# being called with some arguments.
return None
named_only = func_params.find_named_only(schema)
if no_args_call and func_params.has_required_params(schema):
# A call without arguments and there is at least
# one parameter without default.
return None
bound_args_prep: List[Union[MissingArg, BoundArg]] = []
params = func_params.get_in_canonical_order(schema)
nparams = len(params)
nargs = len(args)
has_missing_args = False
ai = 0
pi = 0
matched_kwargs = 0
# Bind NAMED ONLY arguments (they are compiled as first set of arguments).
while True:
if pi >= nparams:
break
param = params[pi]
if param.get_kind(schema) is not _NAMED_ONLY:
break
pi += 1
param_shortname = param.get_parameter_name(schema)
param_type = param.get_type(schema)
if param_shortname in kwargs:
matched_kwargs += 1
arg_type, arg_val = kwargs[param_shortname]
cd = _get_cast_distance(arg_val, arg_type, param_type)
if cd < 0:
return None
bound_args_prep.append(
BoundArg(param, param_type, arg_val, arg_type, cd,
param_shortname))
else:
if param.get_default(schema) is None:
# required named parameter without default and
# without a matching argument
return None
has_missing_args = True
bound_args_prep.append(MissingArg(param, param_type))
if matched_kwargs != len(kwargs):
# extra kwargs?
return None
# Bind POSITIONAL arguments (compiled to go after NAMED ONLY arguments).
while True:
if ai < nargs:
arg_type, arg_val = args[ai]
ai += 1
if pi >= nparams:
# too many positional arguments
return None
param = params[pi]
param_type = param.get_type(schema)
param_kind = param.get_kind(schema)
pi += 1
if param_kind is _NAMED_ONLY:
# impossible condition
raise RuntimeError('unprocessed NAMED ONLY parameter')
if param_kind is _VARIADIC:
param_type = cast(s_types.Array, param_type)
var_type = param_type.get_subtypes(schema)[0]
cd = _get_cast_distance(arg_val, arg_type, var_type)
if cd < 0:
return None
| if barg.param is None:
# Skip injected bitmask argument.
continue
| random_line_split |
polyres.py | )
# No options means an error is going to happen later, but for now,
# just return some placeholders so that we can make it to the
# error later.
if not options:
return {k: _SINGLETON for k in set(range(num_args)) | kwargs_names}
fts: Dict[Union[int, str], ft.TypeModifier] = {}
for choice in options:
for barg in choice.args:
if not barg.param or barg.arg_id is None:
continue
ft = barg.param.get_typemod(ctx.env.schema)
if barg.arg_id in fts and fts[barg.arg_id] != ft:
if ft == _SET_OF or fts[barg.arg_id] == _SET_OF:
raise errors.QueryError(
f'argument could be SET OF or not in call to '
f'{candidates[0].get_verbosename(ctx.env.schema)}: '
f'seems like a stdlib bug!')
else:
# If there is a mix between OPTIONAL and SINGLETON
# arguments in possible call sites, we just call it
# optional. Generated code quality will be a little
# worse but still correct.
fts[barg.arg_id] = _OPTIONAL
else:
fts[barg.arg_id] = ft
return fts
def find_callable(
candidates: Iterable[s_func.CallableLike], *,
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
basic_matching_only: bool=False,
ctx: context.ContextLevel) -> List[BoundCall]:
implicit_cast_distance = None
matched = []
candidates = list(candidates)
for candidate in candidates:
call = try_bind_call_args(
args, kwargs, candidate, basic_matching_only, ctx=ctx)
if call is None:
continue
total_cd = sum(barg.cast_distance for barg in call.args)
if implicit_cast_distance is None:
implicit_cast_distance = total_cd
matched.append(call)
elif implicit_cast_distance == total_cd:
matched.append(call)
elif implicit_cast_distance > total_cd:
implicit_cast_distance = total_cd
matched = [call]
if len(matched) <= 1:
# Unambiguios resolution
return matched
else:
# Ambiguous resolution, try to disambiguate by
# checking for total type distance.
type_dist = None
remaining = []
for call in matched:
call_type_dist = 0
for barg in call.args:
if barg.param is None:
# Skip injected bitmask argument.
continue
paramtype = barg.param.get_type(ctx.env.schema)
arg_type_dist = barg.valtype.get_common_parent_type_distance(
paramtype, ctx.env.schema)
call_type_dist += arg_type_dist
if type_dist is None:
type_dist = call_type_dist
remaining.append(call)
elif type_dist == call_type_dist:
remaining.append(call)
elif type_dist > call_type_dist:
type_dist = call_type_dist
remaining = [call]
return remaining
def try_bind_call_args(
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
func: s_func.CallableLike,
basic_matching_only: bool,
*,
ctx: context.ContextLevel) -> Optional[BoundCall]:
return_type = func.get_return_type(ctx.env.schema)
is_abstract = func.get_abstract(ctx.env.schema)
resolved_poly_base_type: Optional[s_types.Type] = None
def _get_cast_distance(
arg: irast.Set,
arg_type: s_types.Type,
param_type: s_types.Type,
) -> int:
nonlocal resolved_poly_base_type
if basic_matching_only:
return 0
if in_polymorphic_func:
# Compiling a body of a polymorphic function.
if arg_type.is_polymorphic(schema):
if param_type.is_polymorphic(schema):
if arg_type.test_polymorphic(schema, param_type):
return 0
else:
return -1
else:
if arg_type.resolve_polymorphic(schema, param_type):
return 0
else:
return -1
if param_type.is_polymorphic(schema):
if not arg_type.test_polymorphic(schema, param_type):
return -1
resolved = param_type.resolve_polymorphic(schema, arg_type)
if resolved is None:
return -1
if resolved_poly_base_type is None:
resolved_poly_base_type = resolved
if resolved_poly_base_type == resolved:
if is_abstract:
return s_types.MAX_TYPE_DISTANCE
elif arg_type.is_range() and param_type.is_multirange():
# Ranges are implicitly cast into multiranges of the same
# type, so they are compatible as far as polymorphic
# resolution goes, but it's still 1 cast.
return 1
else:
|
ctx.env.schema, ct = (
resolved_poly_base_type.find_common_implicitly_castable_type(
resolved,
ctx.env.schema,
)
)
if ct is not None:
# If we found a common implicitly castable type, we
# refine our resolved_poly_base_type to be that as the
# more general case.
resolved_poly_base_type = ct
return s_types.MAX_TYPE_DISTANCE if is_abstract else 0
else:
return -1
if arg_type.issubclass(schema, param_type):
return 0
return arg_type.get_implicit_cast_distance(param_type, schema)
schema = ctx.env.schema
in_polymorphic_func = (
ctx.env.options.func_params is not None and
ctx.env.options.func_params.has_polymorphic(schema)
)
has_empty_variadic = False
no_args_call = not args and not kwargs
has_inlined_defaults = func.has_inlined_defaults(schema)
func_params = func.get_params(schema)
if not func_params:
if no_args_call:
# Match: `func` is a function without parameters
# being called with no arguments.
bargs: List[BoundArg] = []
if has_inlined_defaults:
bytes_t = ctx.env.get_schema_type_and_track(
sn.QualName('std', 'bytes'))
typeref = typegen.type_to_typeref(bytes_t, env=ctx.env)
argval = setgen.ensure_set(
irast.BytesConstant(value=b'\x00', typeref=typeref),
typehint=bytes_t,
ctx=ctx)
bargs = [BoundArg(None, bytes_t, argval, bytes_t, 0, -1)]
return BoundCall(
func, bargs, set(),
return_type, False)
else:
# No match: `func` is a function without parameters
# being called with some arguments.
return None
named_only = func_params.find_named_only(schema)
if no_args_call and func_params.has_required_params(schema):
# A call without arguments and there is at least
# one parameter without default.
return None
bound_args_prep: List[Union[MissingArg, BoundArg]] = []
params = func_params.get_in_canonical_order(schema)
nparams = len(params)
nargs = len(args)
has_missing_args = False
ai = 0
pi = 0
matched_kwargs = 0
# Bind NAMED ONLY arguments (they are compiled as first set of arguments).
while True:
if pi >= nparams:
break
param = params[pi]
if param.get_kind(schema) is not _NAMED_ONLY:
break
pi += 1
param_shortname = param.get_parameter_name(schema)
param_type = param.get_type(schema)
if param_shortname in kwargs:
matched_kwargs += 1
arg_type, arg_val = kwargs[param_shortname]
cd = _get_cast_distance(arg_val, arg_type, param_type)
if cd < 0:
return None
bound_args_prep.append(
BoundArg(param, param_type, arg_val, arg_type, cd,
param_shortname))
else:
if param.get_default(schema) is None:
# required named parameter without default and
# without a matching argument
return None
has_missing_args = True
bound_args_prep.append(MissingArg(param, param_type))
if matched_kwargs != len(kwargs):
# extra kwargs?
return None
# Bind POSITIONAL arguments (compiled to go after NAMED ONLY arguments).
while True:
if ai < nargs:
arg_type, arg_val = args[ai]
ai += 1
if pi >= nparams:
# too many positional arguments
return None
param = params[pi]
param_type = param.get_type(schema)
param_kind = param.get_kind(schema)
pi += 1
if param_kind is _NAMED_ONLY:
# impossible condition
raise RuntimeError('unprocessed NAMED ONLY parameter')
if param_kind is _VARIADIC:
param_type = cast(s_types.Array, param_type)
var_type = param_type.get_subtypes(schema)[0]
cd = _get_cast_distance(arg_val, arg_type, var_type)
if cd < 0:
return None
| return 0 | conditional_block |
polyres.py | )
# No options means an error is going to happen later, but for now,
# just return some placeholders so that we can make it to the
# error later.
if not options:
return {k: _SINGLETON for k in set(range(num_args)) | kwargs_names}
fts: Dict[Union[int, str], ft.TypeModifier] = {}
for choice in options:
for barg in choice.args:
if not barg.param or barg.arg_id is None:
continue
ft = barg.param.get_typemod(ctx.env.schema)
if barg.arg_id in fts and fts[barg.arg_id] != ft:
if ft == _SET_OF or fts[barg.arg_id] == _SET_OF:
raise errors.QueryError(
f'argument could be SET OF or not in call to '
f'{candidates[0].get_verbosename(ctx.env.schema)}: '
f'seems like a stdlib bug!')
else:
# If there is a mix between OPTIONAL and SINGLETON
# arguments in possible call sites, we just call it
# optional. Generated code quality will be a little
# worse but still correct.
fts[barg.arg_id] = _OPTIONAL
else:
fts[barg.arg_id] = ft
return fts
def find_callable(
candidates: Iterable[s_func.CallableLike], *,
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
basic_matching_only: bool=False,
ctx: context.ContextLevel) -> List[BoundCall]:
| matched = [call]
if len(matched) <= 1:
# Unambiguios resolution
return matched
else:
# Ambiguous resolution, try to disambiguate by
# checking for total type distance.
type_dist = None
remaining = []
for call in matched:
call_type_dist = 0
for barg in call.args:
if barg.param is None:
# Skip injected bitmask argument.
continue
paramtype = barg.param.get_type(ctx.env.schema)
arg_type_dist = barg.valtype.get_common_parent_type_distance(
paramtype, ctx.env.schema)
call_type_dist += arg_type_dist
if type_dist is None:
type_dist = call_type_dist
remaining.append(call)
elif type_dist == call_type_dist:
remaining.append(call)
elif type_dist > call_type_dist:
type_dist = call_type_dist
remaining = [call]
return remaining
def try_bind_call_args(
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
func: s_func.CallableLike,
basic_matching_only: bool,
*,
ctx: context.ContextLevel) -> Optional[BoundCall]:
return_type = func.get_return_type(ctx.env.schema)
is_abstract = func.get_abstract(ctx.env.schema)
resolved_poly_base_type: Optional[s_types.Type] = None
def _get_cast_distance(
arg: irast.Set,
arg_type: s_types.Type,
param_type: s_types.Type,
) -> int:
nonlocal resolved_poly_base_type
if basic_matching_only:
return 0
if in_polymorphic_func:
# Compiling a body of a polymorphic function.
if arg_type.is_polymorphic(schema):
if param_type.is_polymorphic(schema):
if arg_type.test_polymorphic(schema, param_type):
return 0
else:
return -1
else:
if arg_type.resolve_polymorphic(schema, param_type):
return 0
else:
return -1
if param_type.is_polymorphic(schema):
if not arg_type.test_polymorphic(schema, param_type):
return -1
resolved = param_type.resolve_polymorphic(schema, arg_type)
if resolved is None:
return -1
if resolved_poly_base_type is None:
resolved_poly_base_type = resolved
if resolved_poly_base_type == resolved:
if is_abstract:
return s_types.MAX_TYPE_DISTANCE
elif arg_type.is_range() and param_type.is_multirange():
# Ranges are implicitly cast into multiranges of the same
# type, so they are compatible as far as polymorphic
# resolution goes, but it's still 1 cast.
return 1
else:
return 0
ctx.env.schema, ct = (
resolved_poly_base_type.find_common_implicitly_castable_type(
resolved,
ctx.env.schema,
)
)
if ct is not None:
# If we found a common implicitly castable type, we
# refine our resolved_poly_base_type to be that as the
# more general case.
resolved_poly_base_type = ct
return s_types.MAX_TYPE_DISTANCE if is_abstract else 0
else:
return -1
if arg_type.issubclass(schema, param_type):
return 0
return arg_type.get_implicit_cast_distance(param_type, schema)
schema = ctx.env.schema
in_polymorphic_func = (
ctx.env.options.func_params is not None and
ctx.env.options.func_params.has_polymorphic(schema)
)
has_empty_variadic = False
no_args_call = not args and not kwargs
has_inlined_defaults = func.has_inlined_defaults(schema)
func_params = func.get_params(schema)
if not func_params:
if no_args_call:
# Match: `func` is a function without parameters
# being called with no arguments.
bargs: List[BoundArg] = []
if has_inlined_defaults:
bytes_t = ctx.env.get_schema_type_and_track(
sn.QualName('std', 'bytes'))
typeref = typegen.type_to_typeref(bytes_t, env=ctx.env)
argval = setgen.ensure_set(
irast.BytesConstant(value=b'\x00', typeref=typeref),
typehint=bytes_t,
ctx=ctx)
bargs = [BoundArg(None, bytes_t, argval, bytes_t, 0, -1)]
return BoundCall(
func, bargs, set(),
return_type, False)
else:
# No match: `func` is a function without parameters
# being called with some arguments.
return None
named_only = func_params.find_named_only(schema)
if no_args_call and func_params.has_required_params(schema):
# A call without arguments and there is at least
# one parameter without default.
return None
bound_args_prep: List[Union[MissingArg, BoundArg]] = []
params = func_params.get_in_canonical_order(schema)
nparams = len(params)
nargs = len(args)
has_missing_args = False
ai = 0
pi = 0
matched_kwargs = 0
# Bind NAMED ONLY arguments (they are compiled as first set of arguments).
while True:
if pi >= nparams:
break
param = params[pi]
if param.get_kind(schema) is not _NAMED_ONLY:
break
pi += 1
param_shortname = param.get_parameter_name(schema)
param_type = param.get_type(schema)
if param_shortname in kwargs:
matched_kwargs += 1
arg_type, arg_val = kwargs[param_shortname]
cd = _get_cast_distance(arg_val, arg_type, param_type)
if cd < 0:
return None
bound_args_prep.append(
BoundArg(param, param_type, arg_val, arg_type, cd,
param_shortname))
else:
if param.get_default(schema) is None:
# required named parameter without default and
# without a matching argument
return None
has_missing_args = True
bound_args_prep.append(MissingArg(param, param_type))
if matched_kwargs != len(kwargs):
# extra kwargs?
return None
# Bind POSITIONAL arguments (compiled to go after NAMED ONLY arguments).
while True:
if ai < nargs:
arg_type, arg_val = args[ai]
ai += 1
if pi >= nparams:
# too many positional arguments
return None
param = params[pi]
param_type = param.get_type(schema)
param_kind = param.get_kind(schema)
pi += 1
if param_kind is _NAMED_ONLY:
# impossible condition
raise RuntimeError('unprocessed NAMED ONLY parameter')
if param_kind is _VARIADIC:
param_type = cast(s_types.Array, param_type)
var_type = param_type.get_subtypes(schema)[0]
cd = _get_cast_distance(arg_val, arg_type, var_type)
if cd < 0:
return None
| implicit_cast_distance = None
matched = []
candidates = list(candidates)
for candidate in candidates:
call = try_bind_call_args(
args, kwargs, candidate, basic_matching_only, ctx=ctx)
if call is None:
continue
total_cd = sum(barg.cast_distance for barg in call.args)
if implicit_cast_distance is None:
implicit_cast_distance = total_cd
matched.append(call)
elif implicit_cast_distance == total_cd:
matched.append(call)
elif implicit_cast_distance > total_cd:
implicit_cast_distance = total_cd | identifier_body |
polyres.py | (NamedTuple):
func: s_func.CallableLike
args: List[BoundArg]
null_args: Set[str]
return_type: s_types.Type
has_empty_variadic: bool
_VARIADIC = ft.ParameterKind.VariadicParam
_NAMED_ONLY = ft.ParameterKind.NamedOnlyParam
_POSITIONAL = ft.ParameterKind.PositionalParam
_SET_OF = ft.TypeModifier.SetOfType
_OPTIONAL = ft.TypeModifier.OptionalType
_SINGLETON = ft.TypeModifier.SingletonType
def find_callable_typemods(
candidates: Sequence[s_func.CallableLike], *,
num_args: int,
kwargs_names: AbstractSet[str],
ctx: context.ContextLevel) -> Dict[Union[int, str], ft.TypeModifier]:
"""Find the type modifiers for a callable.
We do this early, before we've compiled/checked the arguments,
so that we can compile the arguments with the proper fences.
"""
typ = s_pseudo.PseudoType.get(ctx.env.schema, 'anytype')
dummy = irast.EmptySet() # type: ignore
args = [(typ, dummy)] * num_args
kwargs = {k: (typ, dummy) for k in kwargs_names}
options = find_callable(
candidates, basic_matching_only=True, args=args, kwargs=kwargs, ctx=ctx
)
# No options means an error is going to happen later, but for now,
# just return some placeholders so that we can make it to the
# error later.
if not options:
return {k: _SINGLETON for k in set(range(num_args)) | kwargs_names}
fts: Dict[Union[int, str], ft.TypeModifier] = {}
for choice in options:
for barg in choice.args:
if not barg.param or barg.arg_id is None:
continue
ft = barg.param.get_typemod(ctx.env.schema)
if barg.arg_id in fts and fts[barg.arg_id] != ft:
if ft == _SET_OF or fts[barg.arg_id] == _SET_OF:
raise errors.QueryError(
f'argument could be SET OF or not in call to '
f'{candidates[0].get_verbosename(ctx.env.schema)}: '
f'seems like a stdlib bug!')
else:
# If there is a mix between OPTIONAL and SINGLETON
# arguments in possible call sites, we just call it
# optional. Generated code quality will be a little
# worse but still correct.
fts[barg.arg_id] = _OPTIONAL
else:
fts[barg.arg_id] = ft
return fts
def find_callable(
candidates: Iterable[s_func.CallableLike], *,
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
basic_matching_only: bool=False,
ctx: context.ContextLevel) -> List[BoundCall]:
implicit_cast_distance = None
matched = []
candidates = list(candidates)
for candidate in candidates:
call = try_bind_call_args(
args, kwargs, candidate, basic_matching_only, ctx=ctx)
if call is None:
continue
total_cd = sum(barg.cast_distance for barg in call.args)
if implicit_cast_distance is None:
implicit_cast_distance = total_cd
matched.append(call)
elif implicit_cast_distance == total_cd:
matched.append(call)
elif implicit_cast_distance > total_cd:
implicit_cast_distance = total_cd
matched = [call]
if len(matched) <= 1:
# Unambiguios resolution
return matched
else:
# Ambiguous resolution, try to disambiguate by
# checking for total type distance.
type_dist = None
remaining = []
for call in matched:
call_type_dist = 0
for barg in call.args:
if barg.param is None:
# Skip injected bitmask argument.
continue
paramtype = barg.param.get_type(ctx.env.schema)
arg_type_dist = barg.valtype.get_common_parent_type_distance(
paramtype, ctx.env.schema)
call_type_dist += arg_type_dist
if type_dist is None:
type_dist = call_type_dist
remaining.append(call)
elif type_dist == call_type_dist:
remaining.append(call)
elif type_dist > call_type_dist:
type_dist = call_type_dist
remaining = [call]
return remaining
def try_bind_call_args(
args: Sequence[Tuple[s_types.Type, irast.Set]],
kwargs: Mapping[str, Tuple[s_types.Type, irast.Set]],
func: s_func.CallableLike,
basic_matching_only: bool,
*,
ctx: context.ContextLevel) -> Optional[BoundCall]:
return_type = func.get_return_type(ctx.env.schema)
is_abstract = func.get_abstract(ctx.env.schema)
resolved_poly_base_type: Optional[s_types.Type] = None
def _get_cast_distance(
arg: irast.Set,
arg_type: s_types.Type,
param_type: s_types.Type,
) -> int:
nonlocal resolved_poly_base_type
if basic_matching_only:
return 0
if in_polymorphic_func:
# Compiling a body of a polymorphic function.
if arg_type.is_polymorphic(schema):
if param_type.is_polymorphic(schema):
if arg_type.test_polymorphic(schema, param_type):
return 0
else:
return -1
else:
if arg_type.resolve_polymorphic(schema, param_type):
return 0
else:
return -1
if param_type.is_polymorphic(schema):
if not arg_type.test_polymorphic(schema, param_type):
return -1
resolved = param_type.resolve_polymorphic(schema, arg_type)
if resolved is None:
return -1
if resolved_poly_base_type is None:
resolved_poly_base_type = resolved
if resolved_poly_base_type == resolved:
if is_abstract:
return s_types.MAX_TYPE_DISTANCE
elif arg_type.is_range() and param_type.is_multirange():
# Ranges are implicitly cast into multiranges of the same
# type, so they are compatible as far as polymorphic
# resolution goes, but it's still 1 cast.
return 1
else:
return 0
ctx.env.schema, ct = (
resolved_poly_base_type.find_common_implicitly_castable_type(
resolved,
ctx.env.schema,
)
)
if ct is not None:
# If we found a common implicitly castable type, we
# refine our resolved_poly_base_type to be that as the
# more general case.
resolved_poly_base_type = ct
return s_types.MAX_TYPE_DISTANCE if is_abstract else 0
else:
return -1
if arg_type.issubclass(schema, param_type):
return 0
return arg_type.get_implicit_cast_distance(param_type, schema)
schema = ctx.env.schema
in_polymorphic_func = (
ctx.env.options.func_params is not None and
ctx.env.options.func_params.has_polymorphic(schema)
)
has_empty_variadic = False
no_args_call = not args and not kwargs
has_inlined_defaults = func.has_inlined_defaults(schema)
func_params = func.get_params(schema)
if not func_params:
if no_args_call:
# Match: `func` is a function without parameters
# being called with no arguments.
bargs: List[BoundArg] = []
if has_inlined_defaults:
bytes_t = ctx.env.get_schema_type_and_track(
sn.QualName('std', 'bytes'))
typeref = typegen.type_to_typeref(bytes_t, env=ctx.env)
argval = setgen.ensure_set(
irast.BytesConstant(value=b'\x00', typeref=typeref),
typehint=bytes_t,
ctx=ctx)
bargs = [BoundArg(None, bytes_t, argval, bytes_t, 0, -1)]
return BoundCall(
func, bargs, set(),
return_type, False)
else:
# No match: `func` is a function without parameters
# being called with some arguments.
return None
named_only = func_params.find_named_only(schema)
if no_args_call and func_params.has_required_params(schema):
# A call without arguments and there is at least
# one parameter without default.
return None
bound_args_prep: List[Union[MissingArg, BoundArg]] = []
params = func_params.get_in_canonical_order(schema)
nparams = len(params)
nargs = len(args)
has_missing_args = False
ai = 0
pi = 0
matched_kwargs = 0
# Bind NAMED ONLY arguments (they are compiled as first set of arguments).
while True:
if pi >= nparams:
break
param = params[pi]
if param.get_kind(schema) is not _NAMED_ONLY:
break
pi += 1
param_shortname = param.get_parameter_name(schema)
param_type = param.get_type(schema)
if param_shortname in kwargs:
matched_kwargs += 1
arg_type, arg_val = kwargs[param_shortname]
cd = _get_cast_distance(arg_val, arg_type, param_type)
if cd < | BoundCall | identifier_name |
|
httpretty.go | on any *http.Client
//
// if _, err := http.Get("https://www.google.com/"); err != nil {
// fmt.Fprintf(os.Stderr, "%+v\n", err)
// os.Exit(1)
// }
// }
//
// If you pass nil to the logger.RoundTripper it is going to fallback to http.DefaultTransport.
//
// You can use the logger quickly to log requests on your server. For example:
// logger := &httpretty.Logger{
// Time: true,
// TLS: true,
// RequestHeader: true,
// RequestBody: true,
// ResponseHeader: true,
// ResponseBody: true,
// }
//
// logger.Middleware(handler)
//
// Note: server logs don't include response headers set by the server.
// Client logs don't include request headers set by the HTTP client.
package httpretty
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"net/textproto"
"os"
"sync"
"github.com/henvic/httpretty/internal/color"
)
// Formatter can be used to format body.
//
// If the Format function returns an error, the content is printed in verbatim after a warning.
// Match receives a media type from the Content-Type field. The body is formatted if it returns true.
type Formatter interface {
Match(mediatype string) bool
Format(w io.Writer, src []byte) error
}
// WithHide can be used to protect a request from being exposed.
func WithHide(ctx context.Context) context.Context {
return context.WithValue(ctx, contextHide{}, struct{}{})
}
// Logger provides a way for you to print client and server-side information about your HTTP traffic.
type Logger struct {
// SkipRequestInfo avoids printing a line showing the request URI on all requests plus a line
// containing the remote address on server-side requests.
SkipRequestInfo bool
// Time the request began and its duration.
Time bool
// TLS information, such as certificates and ciphers.
// BUG(henvic): Currently, the TLS information prints after the response header, although it
// should be printed before the request header.
TLS bool
// RequestHeader set by the client or received from the server.
RequestHeader bool
// RequestBody sent by the client or received by the server.
RequestBody bool
// ResponseHeader received by the client or set by the HTTP handlers.
ResponseHeader bool
// ResponseBody received by the client or set by the server.
ResponseBody bool
// SkipSanitize bypasses sanitizing headers containing credentials (such as Authorization).
SkipSanitize bool
// Colors set ANSI escape codes that terminals use to print text in different colors.
Colors bool
// Formatters for the request and response bodies.
// No standard formatters are used. You need to add what you want to use explicitly.
// We provide a JSONFormatter for convenience (add it manually).
Formatters []Formatter
// MaxRequestBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxRequestBody int64
// MaxResponseBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxResponseBody int64
mu sync.Mutex // ensures atomic writes; protects the following fields
w io.Writer
filter Filter
skipHeader map[string]struct{}
bodyFilter BodyFilter
flusher Flusher
}
// Filter allows you to skip requests.
//
// If an error happens and you want to log it, you can pass a not-null error value.
type Filter func(req *http.Request) (skip bool, err error)
// BodyFilter allows you to skip printing a HTTP body based on its associated Header.
//
// It can be used for omitting HTTP Request and Response bodies.
// You can filter by checking properties such as Content-Type or Content-Length.
//
// On a HTTP server, this function is called even when no body is present due to
// http.Request always carrying a non-nil value.
type BodyFilter func(h http.Header) (skip bool, err error)
// Flusher defines how logger prints requests.
type Flusher int
// Logger can print without flushing, when they are available, or when the request is done.
const (
// NoBuffer strategy prints anything immediately, without buffering.
// It has the issue of mingling concurrent requests in unpredictable ways.
NoBuffer Flusher = iota
// OnReady buffers and prints each step of the request or response (header, body) whenever they are ready.
// It reduces mingling caused by mingling but does not give any ordering guarantee, so responses can still be out of order.
OnReady
// OnEnd buffers the whole request and flushes it once, in the end.
OnEnd
)
// SetFilter allows you to set a function to skip requests.
// Pass nil to remove the filter. This method is concurrency safe.
func (l *Logger) SetFilter(f Filter) {
l.mu.Lock()
defer l.mu.Unlock()
l.filter = f
}
// SkipHeader allows you to skip printing specific headers.
// This method is concurrency safe.
func (l *Logger) SkipHeader(headers []string) {
l.mu.Lock()
defer l.mu.Unlock()
m := map[string]struct{}{}
for _, h := range headers {
m[textproto.CanonicalMIMEHeaderKey(h)] = struct{}{}
}
l.skipHeader = m
}
// SetBodyFilter allows you to set a function to skip printing a body.
// Pass nil to remove the body filter. This method is concurrency safe.
func (l *Logger) SetBodyFilter(f BodyFilter) {
l.mu.Lock()
defer l.mu.Unlock()
l.bodyFilter = f
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.w = w
}
// SetFlusher sets the flush strategy for the logger.
func (l *Logger) SetFlusher(f Flusher) {
l.mu.Lock()
defer l.mu.Unlock()
l.flusher = f
}
func (l *Logger) getWriter() io.Writer {
if l.w == nil {
return os.Stdout
}
return l.w
}
func (l *Logger) | () Filter {
l.mu.Lock()
f := l.filter
defer l.mu.Unlock()
return f
}
func (l *Logger) getBodyFilter() BodyFilter {
l.mu.Lock()
f := l.bodyFilter
defer l.mu.Unlock()
return f
}
func (l *Logger) cloneSkipHeader() map[string]struct{} {
l.mu.Lock()
skipped := l.skipHeader
l.mu.Unlock()
m := map[string]struct{}{}
for h := range skipped {
m[h] = struct{}{}
}
return m
}
type contextHide struct{}
type roundTripper struct {
logger *Logger
rt http.RoundTripper
}
// RoundTripper returns a RoundTripper that uses the logger.
func (l *Logger) RoundTripper(rt http.RoundTripper) http.RoundTripper {
return roundTripper{
logger: l,
rt: rt,
}
}
// RoundTrip implements the http.RoundTrip interface.
func (r roundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
tripper := r.rt
if tripper == nil {
// BUG(henvic): net/http data race condition when the client
// does concurrent requests using the very same HTTP transport.
// See Go standard library issue https://golang.org/issue/30597
tripper = http.RoundTripper(http.DefaultTransport)
}
l := r.logger
p := newPrinter(l)
defer p.flush()
if hide := req.Context().Value(contextHide{}); hide != nil || p.checkFilter(req) {
return tripper.RoundTrip(req)
}
var tlsClientConfig *tls.Config
if l.Time {
defer p.printTimeRequest()()
}
if !l.SkipRequestInfo {
p.printRequestInfo(req)
}
if transport, ok := tripper.(*http.Transport); ok && transport.TLSClientConfig != nil {
tlsClientConfig = transport.TLSClientConfig
if tlsClientConfig.InsecureSkipVerify {
p.printf("* Skipping TLS verification: %s\n",
p.format(color.FgRed, "connection is susceptible to man-in-the-middle attacks."))
}
}
if l.TLS && tlsClientConfig != nil {
// please remember http.Request.TLS is ignored by the HTTP client.
p.printOutgoingClientTLS(tlsClientConfig)
}
p.printRequest(req)
defer func() {
if err != nil {
p.printf("* %s\n", p.format(color.FgRed, err.Error()))
if resp == nil {
return
}
}
if l.TLS {
p.printTLSInfo(resp.TLS, false)
p.printTLSServer(req.Host, resp.TLS)
}
p.printResponse(resp)
}()
return tripper.RoundTrip(req)
}
// Middleware for logging incoming requests to a HTTP server.
func (l *Logger) Middleware(next http.Handler) http.Handler {
return httpHandler{
logger: l,
next: next,
}
}
type httpHandler struct {
logger *Logger
next http.Handler
}
// ServeHTTP is a middleware for logging incoming requests to a HTTP server | getFilter | identifier_name |
httpretty.go | httpretty.Logger{
// Time: true,
// TLS: true,
// RequestHeader: true,
// RequestBody: true,
// ResponseHeader: true,
// ResponseBody: true,
// }
//
// logger.Middleware(handler)
//
// Note: server logs don't include response headers set by the server.
// Client logs don't include request headers set by the HTTP client.
package httpretty
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"net/textproto"
"os"
"sync"
"github.com/henvic/httpretty/internal/color"
)
// Formatter can be used to format body.
//
// If the Format function returns an error, the content is printed in verbatim after a warning.
// Match receives a media type from the Content-Type field. The body is formatted if it returns true.
type Formatter interface {
Match(mediatype string) bool
Format(w io.Writer, src []byte) error
}
// WithHide can be used to protect a request from being exposed.
func WithHide(ctx context.Context) context.Context {
return context.WithValue(ctx, contextHide{}, struct{}{})
}
// Logger provides a way for you to print client and server-side information about your HTTP traffic.
type Logger struct {
// SkipRequestInfo avoids printing a line showing the request URI on all requests plus a line
// containing the remote address on server-side requests.
SkipRequestInfo bool
// Time the request began and its duration.
Time bool
// TLS information, such as certificates and ciphers.
// BUG(henvic): Currently, the TLS information prints after the response header, although it
// should be printed before the request header.
TLS bool
// RequestHeader set by the client or received from the server.
RequestHeader bool
// RequestBody sent by the client or received by the server.
RequestBody bool
// ResponseHeader received by the client or set by the HTTP handlers.
ResponseHeader bool
// ResponseBody received by the client or set by the server.
ResponseBody bool
// SkipSanitize bypasses sanitizing headers containing credentials (such as Authorization).
SkipSanitize bool
// Colors set ANSI escape codes that terminals use to print text in different colors.
Colors bool
// Formatters for the request and response bodies.
// No standard formatters are used. You need to add what you want to use explicitly.
// We provide a JSONFormatter for convenience (add it manually).
Formatters []Formatter
// MaxRequestBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxRequestBody int64
// MaxResponseBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxResponseBody int64
mu sync.Mutex // ensures atomic writes; protects the following fields
w io.Writer
filter Filter
skipHeader map[string]struct{}
bodyFilter BodyFilter
flusher Flusher
}
// Filter allows you to skip requests.
//
// If an error happens and you want to log it, you can pass a not-null error value.
type Filter func(req *http.Request) (skip bool, err error)
// BodyFilter allows you to skip printing a HTTP body based on its associated Header.
//
// It can be used for omitting HTTP Request and Response bodies.
// You can filter by checking properties such as Content-Type or Content-Length.
//
// On a HTTP server, this function is called even when no body is present due to
// http.Request always carrying a non-nil value.
type BodyFilter func(h http.Header) (skip bool, err error)
// Flusher defines how logger prints requests.
type Flusher int
// Logger can print without flushing, when they are available, or when the request is done.
const (
// NoBuffer strategy prints anything immediately, without buffering.
// It has the issue of mingling concurrent requests in unpredictable ways.
NoBuffer Flusher = iota
// OnReady buffers and prints each step of the request or response (header, body) whenever they are ready.
// It reduces mingling caused by mingling but does not give any ordering guarantee, so responses can still be out of order.
OnReady
// OnEnd buffers the whole request and flushes it once, in the end.
OnEnd
)
// SetFilter allows you to set a function to skip requests.
// Pass nil to remove the filter. This method is concurrency safe.
func (l *Logger) SetFilter(f Filter) {
l.mu.Lock()
defer l.mu.Unlock()
l.filter = f
}
// SkipHeader allows you to skip printing specific headers.
// This method is concurrency safe.
func (l *Logger) SkipHeader(headers []string) {
l.mu.Lock()
defer l.mu.Unlock()
m := map[string]struct{}{}
for _, h := range headers {
m[textproto.CanonicalMIMEHeaderKey(h)] = struct{}{}
}
l.skipHeader = m
}
// SetBodyFilter allows you to set a function to skip printing a body.
// Pass nil to remove the body filter. This method is concurrency safe.
func (l *Logger) SetBodyFilter(f BodyFilter) {
l.mu.Lock()
defer l.mu.Unlock()
l.bodyFilter = f
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.w = w
}
// SetFlusher sets the flush strategy for the logger.
func (l *Logger) SetFlusher(f Flusher) {
l.mu.Lock()
defer l.mu.Unlock()
l.flusher = f
}
func (l *Logger) getWriter() io.Writer {
if l.w == nil {
return os.Stdout
}
return l.w
}
func (l *Logger) getFilter() Filter {
l.mu.Lock()
f := l.filter
defer l.mu.Unlock()
return f
}
func (l *Logger) getBodyFilter() BodyFilter {
l.mu.Lock()
f := l.bodyFilter
defer l.mu.Unlock()
return f
}
func (l *Logger) cloneSkipHeader() map[string]struct{} {
l.mu.Lock()
skipped := l.skipHeader
l.mu.Unlock()
m := map[string]struct{}{}
for h := range skipped {
m[h] = struct{}{}
}
return m
}
type contextHide struct{}
type roundTripper struct {
logger *Logger
rt http.RoundTripper
}
// RoundTripper returns a RoundTripper that uses the logger.
func (l *Logger) RoundTripper(rt http.RoundTripper) http.RoundTripper {
return roundTripper{
logger: l,
rt: rt,
}
}
// RoundTrip implements the http.RoundTrip interface.
func (r roundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
tripper := r.rt
if tripper == nil {
// BUG(henvic): net/http data race condition when the client
// does concurrent requests using the very same HTTP transport.
// See Go standard library issue https://golang.org/issue/30597
tripper = http.RoundTripper(http.DefaultTransport)
}
l := r.logger
p := newPrinter(l)
defer p.flush()
if hide := req.Context().Value(contextHide{}); hide != nil || p.checkFilter(req) {
return tripper.RoundTrip(req)
}
var tlsClientConfig *tls.Config
if l.Time {
defer p.printTimeRequest()()
}
if !l.SkipRequestInfo {
p.printRequestInfo(req)
}
if transport, ok := tripper.(*http.Transport); ok && transport.TLSClientConfig != nil {
tlsClientConfig = transport.TLSClientConfig
if tlsClientConfig.InsecureSkipVerify {
p.printf("* Skipping TLS verification: %s\n",
p.format(color.FgRed, "connection is susceptible to man-in-the-middle attacks."))
}
}
if l.TLS && tlsClientConfig != nil {
// please remember http.Request.TLS is ignored by the HTTP client.
p.printOutgoingClientTLS(tlsClientConfig)
}
p.printRequest(req)
defer func() {
if err != nil {
p.printf("* %s\n", p.format(color.FgRed, err.Error()))
if resp == nil {
return
}
}
if l.TLS {
p.printTLSInfo(resp.TLS, false)
p.printTLSServer(req.Host, resp.TLS)
}
p.printResponse(resp)
}()
return tripper.RoundTrip(req)
}
// Middleware for logging incoming requests to a HTTP server.
func (l *Logger) Middleware(next http.Handler) http.Handler {
return httpHandler{
logger: l,
next: next,
}
}
type httpHandler struct {
logger *Logger
next http.Handler
}
// ServeHTTP is a middleware for logging incoming requests to a HTTP server.
func (h httpHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
l := h.logger
p := newPrinter(l)
defer p.flush()
if hide := req.Context().Value(contextHide{}); hide != nil || p.checkFilter(req) {
h.next.ServeHTTP(w, req)
return
}
if p.logger.Time {
defer p.printTimeRequest()()
}
if !p.logger.SkipRequestInfo | {
p.printRequestInfo(req)
} | conditional_block |
|
httpretty.go | any *http.Client
//
// if _, err := http.Get("https://www.google.com/"); err != nil {
// fmt.Fprintf(os.Stderr, "%+v\n", err)
// os.Exit(1)
// }
// }
//
// If you pass nil to the logger.RoundTripper it is going to fallback to http.DefaultTransport.
//
// You can use the logger quickly to log requests on your server. For example:
// logger := &httpretty.Logger{
// Time: true,
// TLS: true,
// RequestHeader: true,
// RequestBody: true,
// ResponseHeader: true,
// ResponseBody: true,
// }
//
// logger.Middleware(handler)
//
// Note: server logs don't include response headers set by the server.
// Client logs don't include request headers set by the HTTP client.
package httpretty
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"net/textproto"
"os"
"sync"
"github.com/henvic/httpretty/internal/color"
)
// Formatter can be used to format body.
//
// If the Format function returns an error, the content is printed in verbatim after a warning.
// Match receives a media type from the Content-Type field. The body is formatted if it returns true.
type Formatter interface {
Match(mediatype string) bool
Format(w io.Writer, src []byte) error
}
// WithHide can be used to protect a request from being exposed.
func WithHide(ctx context.Context) context.Context {
return context.WithValue(ctx, contextHide{}, struct{}{})
}
// Logger provides a way for you to print client and server-side information about your HTTP traffic.
type Logger struct {
// SkipRequestInfo avoids printing a line showing the request URI on all requests plus a line
// containing the remote address on server-side requests.
SkipRequestInfo bool
// Time the request began and its duration.
Time bool
// TLS information, such as certificates and ciphers.
// BUG(henvic): Currently, the TLS information prints after the response header, although it
// should be printed before the request header.
TLS bool
// RequestHeader set by the client or received from the server.
RequestHeader bool
// RequestBody sent by the client or received by the server.
RequestBody bool
// ResponseHeader received by the client or set by the HTTP handlers.
ResponseHeader bool
// ResponseBody received by the client or set by the server.
ResponseBody bool
// SkipSanitize bypasses sanitizing headers containing credentials (such as Authorization).
SkipSanitize bool
// Colors set ANSI escape codes that terminals use to print text in different colors.
Colors bool
// Formatters for the request and response bodies.
// No standard formatters are used. You need to add what you want to use explicitly.
// We provide a JSONFormatter for convenience (add it manually).
Formatters []Formatter
// MaxRequestBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxRequestBody int64
// MaxResponseBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxResponseBody int64
mu sync.Mutex // ensures atomic writes; protects the following fields
w io.Writer
filter Filter
skipHeader map[string]struct{}
bodyFilter BodyFilter
flusher Flusher
}
// Filter allows you to skip requests.
//
// If an error happens and you want to log it, you can pass a not-null error value.
type Filter func(req *http.Request) (skip bool, err error)
// BodyFilter allows you to skip printing a HTTP body based on its associated Header.
//
// It can be used for omitting HTTP Request and Response bodies.
// You can filter by checking properties such as Content-Type or Content-Length.
//
// On a HTTP server, this function is called even when no body is present due to
// http.Request always carrying a non-nil value.
type BodyFilter func(h http.Header) (skip bool, err error)
// Flusher defines how logger prints requests.
type Flusher int
// Logger can print without flushing, when they are available, or when the request is done.
const (
// NoBuffer strategy prints anything immediately, without buffering.
// It has the issue of mingling concurrent requests in unpredictable ways.
NoBuffer Flusher = iota
// OnReady buffers and prints each step of the request or response (header, body) whenever they are ready.
// It reduces mingling caused by mingling but does not give any ordering guarantee, so responses can still be out of order.
OnReady
// OnEnd buffers the whole request and flushes it once, in the end.
OnEnd
)
// SetFilter allows you to set a function to skip requests.
// Pass nil to remove the filter. This method is concurrency safe.
func (l *Logger) SetFilter(f Filter) {
l.mu.Lock()
defer l.mu.Unlock()
l.filter = f
}
// SkipHeader allows you to skip printing specific headers.
// This method is concurrency safe.
func (l *Logger) SkipHeader(headers []string) {
l.mu.Lock()
defer l.mu.Unlock()
m := map[string]struct{}{}
for _, h := range headers {
m[textproto.CanonicalMIMEHeaderKey(h)] = struct{}{}
}
l.skipHeader = m
}
// SetBodyFilter allows you to set a function to skip printing a body.
// Pass nil to remove the body filter. This method is concurrency safe.
func (l *Logger) SetBodyFilter(f BodyFilter) {
l.mu.Lock()
defer l.mu.Unlock()
l.bodyFilter = f
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.w = w
}
// SetFlusher sets the flush strategy for the logger.
func (l *Logger) SetFlusher(f Flusher) {
l.mu.Lock()
defer l.mu.Unlock()
l.flusher = f
}
func (l *Logger) getWriter() io.Writer {
if l.w == nil {
return os.Stdout
}
return l.w
}
func (l *Logger) getFilter() Filter {
l.mu.Lock()
f := l.filter
defer l.mu.Unlock()
return f
}
func (l *Logger) getBodyFilter() BodyFilter {
l.mu.Lock()
f := l.bodyFilter
defer l.mu.Unlock()
return f
}
func (l *Logger) cloneSkipHeader() map[string]struct{} {
l.mu.Lock()
skipped := l.skipHeader
l.mu.Unlock()
m := map[string]struct{}{}
for h := range skipped {
m[h] = struct{}{}
}
return m
}
type contextHide struct{}
type roundTripper struct {
logger *Logger
rt http.RoundTripper
}
// RoundTripper returns a RoundTripper that uses the logger.
func (l *Logger) RoundTripper(rt http.RoundTripper) http.RoundTripper |
// RoundTrip implements the http.RoundTrip interface.
func (r roundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
tripper := r.rt
if tripper == nil {
// BUG(henvic): net/http data race condition when the client
// does concurrent requests using the very same HTTP transport.
// See Go standard library issue https://golang.org/issue/30597
tripper = http.RoundTripper(http.DefaultTransport)
}
l := r.logger
p := newPrinter(l)
defer p.flush()
if hide := req.Context().Value(contextHide{}); hide != nil || p.checkFilter(req) {
return tripper.RoundTrip(req)
}
var tlsClientConfig *tls.Config
if l.Time {
defer p.printTimeRequest()()
}
if !l.SkipRequestInfo {
p.printRequestInfo(req)
}
if transport, ok := tripper.(*http.Transport); ok && transport.TLSClientConfig != nil {
tlsClientConfig = transport.TLSClientConfig
if tlsClientConfig.InsecureSkipVerify {
p.printf("* Skipping TLS verification: %s\n",
p.format(color.FgRed, "connection is susceptible to man-in-the-middle attacks."))
}
}
if l.TLS && tlsClientConfig != nil {
// please remember http.Request.TLS is ignored by the HTTP client.
p.printOutgoingClientTLS(tlsClientConfig)
}
p.printRequest(req)
defer func() {
if err != nil {
p.printf("* %s\n", p.format(color.FgRed, err.Error()))
if resp == nil {
return
}
}
if l.TLS {
p.printTLSInfo(resp.TLS, false)
p.printTLSServer(req.Host, resp.TLS)
}
p.printResponse(resp)
}()
return tripper.RoundTrip(req)
}
// Middleware for logging incoming requests to a HTTP server.
func (l *Logger) Middleware(next http.Handler) http.Handler {
return httpHandler{
logger: l,
next: next,
}
}
type httpHandler struct {
logger *Logger
next http.Handler
}
// ServeHTTP is a middleware for logging incoming requests to a HTTP server | {
return roundTripper{
logger: l,
rt: rt,
}
} | identifier_body |
httpretty.go | on any *http.Client
//
// if _, err := http.Get("https://www.google.com/"); err != nil {
// fmt.Fprintf(os.Stderr, "%+v\n", err)
// os.Exit(1)
// }
// }
//
// If you pass nil to the logger.RoundTripper it is going to fallback to http.DefaultTransport.
//
// You can use the logger quickly to log requests on your server. For example:
// logger := &httpretty.Logger{
// Time: true,
// TLS: true,
// RequestHeader: true,
// RequestBody: true,
// ResponseHeader: true,
// ResponseBody: true,
// }
//
// logger.Middleware(handler)
//
// Note: server logs don't include response headers set by the server.
// Client logs don't include request headers set by the HTTP client.
package httpretty
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"io"
"net/http"
"net/textproto" | "sync"
"github.com/henvic/httpretty/internal/color"
)
// Formatter can be used to format body.
//
// If the Format function returns an error, the content is printed in verbatim after a warning.
// Match receives a media type from the Content-Type field. The body is formatted if it returns true.
type Formatter interface {
Match(mediatype string) bool
Format(w io.Writer, src []byte) error
}
// WithHide can be used to protect a request from being exposed.
func WithHide(ctx context.Context) context.Context {
return context.WithValue(ctx, contextHide{}, struct{}{})
}
// Logger provides a way for you to print client and server-side information about your HTTP traffic.
type Logger struct {
// SkipRequestInfo avoids printing a line showing the request URI on all requests plus a line
// containing the remote address on server-side requests.
SkipRequestInfo bool
// Time the request began and its duration.
Time bool
// TLS information, such as certificates and ciphers.
// BUG(henvic): Currently, the TLS information prints after the response header, although it
// should be printed before the request header.
TLS bool
// RequestHeader set by the client or received from the server.
RequestHeader bool
// RequestBody sent by the client or received by the server.
RequestBody bool
// ResponseHeader received by the client or set by the HTTP handlers.
ResponseHeader bool
// ResponseBody received by the client or set by the server.
ResponseBody bool
// SkipSanitize bypasses sanitizing headers containing credentials (such as Authorization).
SkipSanitize bool
// Colors set ANSI escape codes that terminals use to print text in different colors.
Colors bool
// Formatters for the request and response bodies.
// No standard formatters are used. You need to add what you want to use explicitly.
// We provide a JSONFormatter for convenience (add it manually).
Formatters []Formatter
// MaxRequestBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxRequestBody int64
// MaxResponseBody the logger can print.
// If value is not set and Content-Length is not sent, 4096 bytes is considered.
MaxResponseBody int64
mu sync.Mutex // ensures atomic writes; protects the following fields
w io.Writer
filter Filter
skipHeader map[string]struct{}
bodyFilter BodyFilter
flusher Flusher
}
// Filter allows you to skip requests.
//
// If an error happens and you want to log it, you can pass a not-null error value.
type Filter func(req *http.Request) (skip bool, err error)
// BodyFilter allows you to skip printing a HTTP body based on its associated Header.
//
// It can be used for omitting HTTP Request and Response bodies.
// You can filter by checking properties such as Content-Type or Content-Length.
//
// On a HTTP server, this function is called even when no body is present due to
// http.Request always carrying a non-nil value.
type BodyFilter func(h http.Header) (skip bool, err error)
// Flusher defines how logger prints requests.
type Flusher int
// Logger can print without flushing, when they are available, or when the request is done.
const (
// NoBuffer strategy prints anything immediately, without buffering.
// It has the issue of mingling concurrent requests in unpredictable ways.
NoBuffer Flusher = iota
// OnReady buffers and prints each step of the request or response (header, body) whenever they are ready.
// It reduces mingling caused by mingling but does not give any ordering guarantee, so responses can still be out of order.
OnReady
// OnEnd buffers the whole request and flushes it once, in the end.
OnEnd
)
// SetFilter allows you to set a function to skip requests.
// Pass nil to remove the filter. This method is concurrency safe.
func (l *Logger) SetFilter(f Filter) {
l.mu.Lock()
defer l.mu.Unlock()
l.filter = f
}
// SkipHeader allows you to skip printing specific headers.
// This method is concurrency safe.
func (l *Logger) SkipHeader(headers []string) {
l.mu.Lock()
defer l.mu.Unlock()
m := map[string]struct{}{}
for _, h := range headers {
m[textproto.CanonicalMIMEHeaderKey(h)] = struct{}{}
}
l.skipHeader = m
}
// SetBodyFilter allows you to set a function to skip printing a body.
// Pass nil to remove the body filter. This method is concurrency safe.
func (l *Logger) SetBodyFilter(f BodyFilter) {
l.mu.Lock()
defer l.mu.Unlock()
l.bodyFilter = f
}
// SetOutput sets the output destination for the logger.
func (l *Logger) SetOutput(w io.Writer) {
l.mu.Lock()
defer l.mu.Unlock()
l.w = w
}
// SetFlusher sets the flush strategy for the logger.
func (l *Logger) SetFlusher(f Flusher) {
l.mu.Lock()
defer l.mu.Unlock()
l.flusher = f
}
func (l *Logger) getWriter() io.Writer {
if l.w == nil {
return os.Stdout
}
return l.w
}
func (l *Logger) getFilter() Filter {
l.mu.Lock()
f := l.filter
defer l.mu.Unlock()
return f
}
func (l *Logger) getBodyFilter() BodyFilter {
l.mu.Lock()
f := l.bodyFilter
defer l.mu.Unlock()
return f
}
func (l *Logger) cloneSkipHeader() map[string]struct{} {
l.mu.Lock()
skipped := l.skipHeader
l.mu.Unlock()
m := map[string]struct{}{}
for h := range skipped {
m[h] = struct{}{}
}
return m
}
type contextHide struct{}
type roundTripper struct {
logger *Logger
rt http.RoundTripper
}
// RoundTripper returns a RoundTripper that uses the logger.
func (l *Logger) RoundTripper(rt http.RoundTripper) http.RoundTripper {
return roundTripper{
logger: l,
rt: rt,
}
}
// RoundTrip implements the http.RoundTrip interface.
func (r roundTripper) RoundTrip(req *http.Request) (resp *http.Response, err error) {
tripper := r.rt
if tripper == nil {
// BUG(henvic): net/http data race condition when the client
// does concurrent requests using the very same HTTP transport.
// See Go standard library issue https://golang.org/issue/30597
tripper = http.RoundTripper(http.DefaultTransport)
}
l := r.logger
p := newPrinter(l)
defer p.flush()
if hide := req.Context().Value(contextHide{}); hide != nil || p.checkFilter(req) {
return tripper.RoundTrip(req)
}
var tlsClientConfig *tls.Config
if l.Time {
defer p.printTimeRequest()()
}
if !l.SkipRequestInfo {
p.printRequestInfo(req)
}
if transport, ok := tripper.(*http.Transport); ok && transport.TLSClientConfig != nil {
tlsClientConfig = transport.TLSClientConfig
if tlsClientConfig.InsecureSkipVerify {
p.printf("* Skipping TLS verification: %s\n",
p.format(color.FgRed, "connection is susceptible to man-in-the-middle attacks."))
}
}
if l.TLS && tlsClientConfig != nil {
// please remember http.Request.TLS is ignored by the HTTP client.
p.printOutgoingClientTLS(tlsClientConfig)
}
p.printRequest(req)
defer func() {
if err != nil {
p.printf("* %s\n", p.format(color.FgRed, err.Error()))
if resp == nil {
return
}
}
if l.TLS {
p.printTLSInfo(resp.TLS, false)
p.printTLSServer(req.Host, resp.TLS)
}
p.printResponse(resp)
}()
return tripper.RoundTrip(req)
}
// Middleware for logging incoming requests to a HTTP server.
func (l *Logger) Middleware(next http.Handler) http.Handler {
return httpHandler{
logger: l,
next: next,
}
}
type httpHandler struct {
logger *Logger
next http.Handler
}
// ServeHTTP is a middleware for logging incoming requests to a HTTP server.
| "os" | random_line_split |
fvs.py | () # Don't try to delete the label.
else: # Not a self-loop, but a multiple edge.
sole_neighbour = nbr1
sole_nbr_label = sole_neighbour['label']
if sole_nbr_label not in forbidden_set:
partial_fvs.append(sole_nbr_label)
else:
partial_fvs.append(next_label)
next_vertex.delete() # Don't try to delete the label.
if sole_nbr_label not in forbidden_set:
sole_neighbour = H.vs.find(label = sole_nbr_label)
neighbours = sole_neighbour.neighbors() | sole_neighbour.delete()
# discard = remove if present
small_degree_labels.discard(sole_nbr_label)
for neighbour_label in neighbour_labels:
if neighbour_label not in forbidden_set:
neighbour = H.vs.find(label = neighbour_label)
neighbour_degree = neighbour.degree()
if neighbour_degree <= 2:
small_degree_labels.add(neighbour_label)
else: # Our vertex has two distinct neighbours, so we
# bypass our vertex. Except if *both* the neighbours are
# in the forbidden set, in which case we don't do
# anything.
nbr1_label = nbr1['label']
nbr2_label = nbr2['label']
if (nbr1_label not in forbidden_set) or (nbr2_label not in forbidden_set):
next_vertex.delete() # Don't try to delete the label.
nbr1 = H.vs.find(label = nbr1_label)
nbr2 = H.vs.find(label = nbr2_label)
# Check if there is an existing edge between the
# two neighbours.
edge_id = H.get_eid(nbr1, nbr2, error = False)
if edge_id == -1: # There is no edge, so add one.
H.add_edge(nbr1, nbr2)
else: # There is already an edge. If this is a
# multiple edge, then check if our vertex
# deletion has made either of the two
# end-points to be of small degree, and update
# the set accordingly. Else add an edge to
# make it an edge of multiplicity 2.
edge = H.es[edge_id]
if edge.is_multiple():
for nbr in [nbr1, nbr2]:
neighbour_label = nbr['label']
if nbr.degree() <= 2 and \
neighbour_label not in \
forbidden_set :
small_degree_labels.add(neighbour_label)
else:
H.add_edge(nbr1, nbr2)
# Takes a graph G in igraph format and optionally a set of vertex
# labels of G as argument. Returns a tuple (partial_fvs,
# current_graph) where current_graph is the
# graph obtained by exhaustively applying the degree-0, 1, 2 rules
# to G, and partial_fvs is the set of those vertices that are forced
# into every FVS of G by these rules.
#
# None of the vertices whose labels are present in the (optional)
# list forbidden_set will be deleted during the application of
# these rules. This list is empty by default.
#
# Whenever we need to refer to vertices we actually use their
# labels instead.
#
# This function first makes a deep copy of G so that the input
# instance is not modified.
# Calling it "reduct" to avoid conflicts with the built-in reduce()
def reduct(G, forbidden_set = []):
# Since we will do frequent membership checking:
forbidden_set = set(forbidden_set)
H = G.copy() # Make a deep copy so that we don't change G.
# We use deques below because we build these lists element by
# element, which is not very efficient with the list data
# structure in Python.
partial_fvs = deque() # The partial fvs that we have computed
# so far.
# We add to a set the labels of those vertices H whose degree
# is at most 2, provided they are not in forbidden_set. We
# need to add the labels, and not the vertices themselves,
# because the labels are the one invariant for each vertex
# object. This is an artifact of the way igraph implements
# vertices.
labels = (v['label'] for v in H.vs if v.degree() <= 2)
small_degree_labels = set(labels) - forbidden_set
# From now on, the set small_degree_labels must contain
# exactly those vertices of H whose degrees are at most 2 and
# which are not in forbidden_set.
# We now process, according to the degree-0,1,2 reduction
# rules, the vertices whose labels are in
# small_degree_labels. Whenever we make the degree of a vertex
# to be at most 2, and we can't process this vertex
# immediately, and the vertex is not in forbidden_set, we
# add the label of this vertex to small_degree_labels.
while small_degree_labels:
next_label = small_degree_labels.pop()
next_vertex = H.vs.find(label = next_label)
next_degree = next_vertex.degree()
if next_label not in forbidden_set:
if next_degree == 0 :
# Throw this vertex away. We don't delete the label
# of this vertex from the set of small-degree labels,
# because we have popped this label already.
next_vertex.delete()
elif next_degree == 1 :
degree_one_rule(next_vertex, forbidden_set,
small_degree_labels)
else: # We found a vertex of degree 2.
degree_two_rule(next_vertex, forbidden_set,
small_degree_labels, partial_fvs)
# We have now exhaustively applied the reduction rules, and
# can return the resulting partial FVS, the remaining
# graph, and the set of deleted vertices.
return(set(partial_fvs), H)
# The subsets of vertices (i.e. vertex labels) of the input graph
# that we have found so far to induce non-forest subgraphs of the
# input graph. This is for speeding up computation, seems to work.
cyclic_subsets = set()
# Return True if graph G is a forest, and False otherwise. This
# does not modify G.
def is_forest(G):
#return (not G.has_multiple()) and (not G.girth())
# Doing some memoization seems to make this really faster.
global cyclic_subsets
vertex_labels = frozenset(v['label'] for v in G.vs)
if vertex_labels in cyclic_subsets:
return False
else:
isforest = (not G.has_multiple()) and (not G.girth())
if isforest:
return True
else:
cyclic_subsets.add(vertex_labels)
return False
# Finds and returns a lower bound for the size of an FVS of graph
# G. This lower bound is based on some simple arguments. We assume
# that the input graph has no vertices of degree up to 2.
def fvs_lower_bound(G):
n = G.vcount()
if n == 0: # Don't do anything fancy if the graph is already empty
return 0
else:
max_degree = G.maxdegree()
first_lower_bound = int((n + 2)/(max_degree + 1))
degrees = sorted(G.degree(), reverse = True) # Sorted non-increasing
min_degree = degrees[(n - 1)]
d = min_degree - 2
k = first_lower_bound
degree_sum = sum(degrees[0:k])
# A potential improvement on the lower bound
new_lower_bound = (d*n - degree_sum + 2)/d
if (new_lower_bound > k): # There is some actual improvement
# in the lower bound.
# Iterate as long as we have the inequality right
while (k < new_lower_bound): # No FVS of size at most k
k = k + 1 # Try the next k
degree_sum = sum(degrees[0:k])
#new_lower_bound = n - degree_sum + 2
new_lower_bound = (d*n - degree_sum + 2)/d
return k
# Crude lower bound on fvs size, obtained by greedily packing
# cycles in the graph.
def approx_packing_number(G):
# Make a deep copy so that we don't mess up G.
H = G.copy()
packing_approx = 0
while H.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), H.es))
H.delete_vertices(list(multi_edge.tuple))
packing_approx += 1
H_girth_vertices = H.girth(True)
while H_girth_vertices: # While there are cycles left in H
H.delete_vertices(H_girth_vertices)
packing_approx += 1
H_girth_vertices = H.girth(True)
return packing_approx
# Pick a smallest cycle from G and return its vertex list. A
# multiple edge counts as a cycle. This function does not modify
# its argument.
def pick_smallest_cycle(G):
if G.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), G.es))
vertex_set = list | neighbour_labels = list(v['label'] for v in neighbours)
| random_line_split |
fvs.py |
global_lower_bound2 = approx_packing_number(G)
global_current_lower_bound = global_lower_bound1
if global_lower_bound2 > global_current_lower_bound:
global_current_lower_bound = global_lower_bound2
if global_current_lower_bound == global_current_upper_bound :
return global_current_fvs # We got lucky on this one.
graph = G
yes_part = set() # Vertex labels
no_part = set() # Vertex labels
state_list = deque([(graph, yes_part, no_part)])
while state_list:
(H, yes_part, no_part) = state_list.pop()
# The best fvs we could find so far.
local_current_fvs = greedy_approx(H)
local_current_upper_bound = len(local_current_fvs)
# Compute a lower bound for FVS. Cannot use the
# degree-based method because there is no guarantee that
# the minimum degree of H is at least 3, at this point.
local_current_lower_bound = approx_packing_number(H)
local_candidate_fvs = local_current_fvs | yes_part # Set union.
local_candidate_upper_bound = len(local_candidate_fvs)
if local_candidate_upper_bound == global_current_lower_bound:
return local_candidate_fvs # We got lucky on this one.
elif local_candidate_upper_bound < global_current_upper_bound:
#global_current_upper_bound < optimal_check_value and optimality_ticker >= optimality_check_threshold:
global_current_fvs = local_candidate_fvs
global_current_upper_bound = len(local_candidate_fvs)
# The greedy approximation did not get us a certifiably
# best possible upper bound for H, so we have to process
# it futher.
yes_part_size = len(yes_part)
# We process this state only if the yes-part is smaller
# than the current best fvs.
if yes_part_size < global_current_upper_bound: # and no_part_is_forest:
# This is an attempt at optimization. If we have
# already collected a sufficiently large partial fvs
# (the yes-part), check to see if the best that we can
# get from the remaining graph exceeds our remaining
# budget. If this happens, then we don't need to
# process this state and its children. This could
# result in large savings if we are lucky.
# We first apply the reduction rules.
(partial_fvs, H) = reduct(H, forbidden_set = no_part)
if yes_part_size >= (global_current_upper_bound/3): # TODO: Tweak this fraction
# Check if we can rule out this branch
# already. Seems to significantly improve performance.
# The remaining instance.
(rest_partial_fvs, rest_reduced_graph) = copy.deepcopy((partial_fvs, H))
# Compute a lower bound. This function does not
# modify its argument. We cannot use the
# degree-based bound, because rest_reduced_graph
# may have vertices (from no_part) of degree at
# most 2.
rest_lower_bound = approx_packing_number(rest_reduced_graph)
rest_lower_bound += len(rest_partial_fvs)
if (yes_part_size + rest_lower_bound) >= \
global_current_upper_bound:
continue
# Now we check if the partial solution that we have
# found so far, is already at least as big as our
# current best upper bound. If it is, then we abort
# this state.
yes_part = yes_part | partial_fvs # Set union-update
yes_part_size = len(yes_part)
if yes_part_size > global_current_upper_bound :
continue
# If, after the reduction, we have no more vertices in
# the non-no part of H, then the set we have picked so
# far is an FVS for the input graph. This is because
# H[no_part] is a forest.
H_is_trivial = (H.vcount() - len(no_part) == 0)
if H_is_trivial: # This means that we have found an FVS for the graph.
if yes_part_size < global_current_upper_bound:
global_current_upper_bound = yes_part_size
global_current_fvs = yes_part
if global_current_upper_bound == global_current_lower_bound:
return global_current_fvs
# If H is nontrivial at this point, then its vertices
# (other than some vertices in no_part, and some
# neighbours of the no_part) have minimum degree at
# least 3. We branch on the "non-no" vertices of a
# shortest cycle of H.
else:
# Get the label set of a smallest cycle in H.
C = pick_smallest_cycle(H)
girth = len(C)
# The label of the vertex from the cycle that we
# pick in our solution.
v = None
# The prefix of the cycle that we will push to the
# no-part.
new_nos = None
# Loop over the vertex labels of the cycle.
for index in range(girth):
v = C[index]
new_nos = C[:index]
# We don't want to pick a no-vertex into our
# solution.
if v not in no_part:
N_i = set(no_part)
N_i = N_i | set(new_nos)
# If H[N_i] is not a forest, then we stop
# processing this state and go to the
# next.
if not subgraph_is_forest(H, N_i):
continue
# A list of lists, where each list is the
# set of all labels of a component in
# H[N_i].
components = get_components(H, N_i)
promoted_labels = set() # Labels of vertices that are
# promoted to the yes-part because they
# see two or more vertices in a component
# of H[N_i].
all_labels = set(v['label'] for v in H.vs)
candidate_labels = all_labels - N_i
for candidate_label in candidate_labels:
candidate_vertex = H.vs.find(label = candidate_label)
neighbour_labels = set(x['label'] for x in candidate_vertex.neighbors())
for component in components:
if len(component & neighbour_labels) >= 2:
promoted_labels.add(candidate_label)
break
Y_i = copy.deepcopy(yes_part)
Y_i |= (set([v]) | promoted_labels)
if len(Y_i) >= global_current_upper_bound :
continue
H_i = H.copy()
H_i.delete_vertices(v for v in H_i.vs.select(label_in = Y_i))
state_list.append((H_i, Y_i, N_i))
return global_current_fvs
# To remember component label sets that we have already computed.
component_label_sets = dict()
# Return a list of lists, where each list is the set of all labels
# of a component in G[X] where X is the set of vertices of G
# corresponding to label_set. Does not modify its arguments.
def get_components(G, label_set):
global component_label_sets
component_lists = component_label_sets.get(frozenset(label_set))
if component_lists:
return component_lists
vertex_dict = dict(zip(G.vs.get_attribute_values('label'), G.vs.indices))
vertices = (G.vs[vertex_dict[l]] for l in label_set)
component_lists = list()
subgraph = G.induced_subgraph(vertices)
index_set = set(v.index for v in subgraph.vs)
while index_set:
temp_set = set()
root = index_set.pop()
for v in subgraph.bfsiter(root, igraph.ALL):
temp_set.add(v['label'])
others = set(subgraph.subcomponent(root)) - set([root])
for index in others:
index_set.remove(index)
component_lists.append(temp_set)
component_label_sets[frozenset(label_set)] = component_lists
return component_lists
# The simple greedy approximation algorithm. Returns an
# approximate FVS. Does not modify graph G.
def greedy_approx(G):
G = G.copy()
H = G.copy() # For a correctness check at the end.
approx_fvs = deque()
(partial_fvs, G) = reduct(G)
approx_fvs.extend(partial_fvs)
while G.vcount() : # The min-degree-3 reduction takes care of
# the base case and leaves an empty graph.
# Pick the vertex of the largest degree to include in the approximate fvs.
next_vertex = max_degree_vertex(G)
approx_fvs.append(next_vertex['label'])
G.delete_vertices([next_vertex])
(partial_fvs, G) = reduct(G)
approx_fvs.extend(partial_fvs)
approx_fvs_vertices = H.vs.select(label_in = approx_fvs)
H.delete_vertices(approx_fvs_vertices)
return set(approx_fvs)
# Return a vertex of the maximum degree in graph G. This function
# does not modify G.
def max_degree_vertex(G):
max_degree = 0
max_vertex = None
for v in G.vs:
| v_degree = v.degree()
if v_degree > max_degree:
max_degree = v_degree
max_vertex = v | conditional_block |
|
fvs.py | () # Don't try to delete the label.
else: # Not a self-loop, but a multiple edge.
sole_neighbour = nbr1
sole_nbr_label = sole_neighbour['label']
if sole_nbr_label not in forbidden_set:
partial_fvs.append(sole_nbr_label)
else:
partial_fvs.append(next_label)
next_vertex.delete() # Don't try to delete the label.
if sole_nbr_label not in forbidden_set:
sole_neighbour = H.vs.find(label = sole_nbr_label)
neighbours = sole_neighbour.neighbors()
neighbour_labels = list(v['label'] for v in neighbours)
sole_neighbour.delete()
# discard = remove if present
small_degree_labels.discard(sole_nbr_label)
for neighbour_label in neighbour_labels:
if neighbour_label not in forbidden_set:
neighbour = H.vs.find(label = neighbour_label)
neighbour_degree = neighbour.degree()
if neighbour_degree <= 2:
small_degree_labels.add(neighbour_label)
else: # Our vertex has two distinct neighbours, so we
# bypass our vertex. Except if *both* the neighbours are
# in the forbidden set, in which case we don't do
# anything.
nbr1_label = nbr1['label']
nbr2_label = nbr2['label']
if (nbr1_label not in forbidden_set) or (nbr2_label not in forbidden_set):
next_vertex.delete() # Don't try to delete the label.
nbr1 = H.vs.find(label = nbr1_label)
nbr2 = H.vs.find(label = nbr2_label)
# Check if there is an existing edge between the
# two neighbours.
edge_id = H.get_eid(nbr1, nbr2, error = False)
if edge_id == -1: # There is no edge, so add one.
H.add_edge(nbr1, nbr2)
else: # There is already an edge. If this is a
# multiple edge, then check if our vertex
# deletion has made either of the two
# end-points to be of small degree, and update
# the set accordingly. Else add an edge to
# make it an edge of multiplicity 2.
edge = H.es[edge_id]
if edge.is_multiple():
for nbr in [nbr1, nbr2]:
neighbour_label = nbr['label']
if nbr.degree() <= 2 and \
neighbour_label not in \
forbidden_set :
small_degree_labels.add(neighbour_label)
else:
H.add_edge(nbr1, nbr2)
# Takes a graph G in igraph format and optionally a set of vertex
# labels of G as argument. Returns a tuple (partial_fvs,
# current_graph) where current_graph is the
# graph obtained by exhaustively applying the degree-0, 1, 2 rules
# to G, and partial_fvs is the set of those vertices that are forced
# into every FVS of G by these rules.
#
# None of the vertices whose labels are present in the (optional)
# list forbidden_set will be deleted during the application of
# these rules. This list is empty by default.
#
# Whenever we need to refer to vertices we actually use their
# labels instead.
#
# This function first makes a deep copy of G so that the input
# instance is not modified.
# Calling it "reduct" to avoid conflicts with the built-in reduce()
def reduct(G, forbidden_set = []):
# Since we will do frequent membership checking:
forbidden_set = set(forbidden_set)
H = G.copy() # Make a deep copy so that we don't change G.
# We use deques below because we build these lists element by
# element, which is not very efficient with the list data
# structure in Python.
partial_fvs = deque() # The partial fvs that we have computed
# so far.
# We add to a set the labels of those vertices H whose degree
# is at most 2, provided they are not in forbidden_set. We
# need to add the labels, and not the vertices themselves,
# because the labels are the one invariant for each vertex
# object. This is an artifact of the way igraph implements
# vertices.
labels = (v['label'] for v in H.vs if v.degree() <= 2)
small_degree_labels = set(labels) - forbidden_set
# From now on, the set small_degree_labels must contain
# exactly those vertices of H whose degrees are at most 2 and
# which are not in forbidden_set.
# We now process, according to the degree-0,1,2 reduction
# rules, the vertices whose labels are in
# small_degree_labels. Whenever we make the degree of a vertex
# to be at most 2, and we can't process this vertex
# immediately, and the vertex is not in forbidden_set, we
# add the label of this vertex to small_degree_labels.
while small_degree_labels:
next_label = small_degree_labels.pop()
next_vertex = H.vs.find(label = next_label)
next_degree = next_vertex.degree()
if next_label not in forbidden_set:
if next_degree == 0 :
# Throw this vertex away. We don't delete the label
# of this vertex from the set of small-degree labels,
# because we have popped this label already.
next_vertex.delete()
elif next_degree == 1 :
degree_one_rule(next_vertex, forbidden_set,
small_degree_labels)
else: # We found a vertex of degree 2.
degree_two_rule(next_vertex, forbidden_set,
small_degree_labels, partial_fvs)
# We have now exhaustively applied the reduction rules, and
# can return the resulting partial FVS, the remaining
# graph, and the set of deleted vertices.
return(set(partial_fvs), H)
# The subsets of vertices (i.e. vertex labels) of the input graph
# that we have found so far to induce non-forest subgraphs of the
# input graph. This is for speeding up computation, seems to work.
cyclic_subsets = set()
# Return True if graph G is a forest, and False otherwise. This
# does not modify G.
def is_forest(G):
#return (not G.has_multiple()) and (not G.girth())
# Doing some memoization seems to make this really faster.
global cyclic_subsets
vertex_labels = frozenset(v['label'] for v in G.vs)
if vertex_labels in cyclic_subsets:
return False
else:
isforest = (not G.has_multiple()) and (not G.girth())
if isforest:
return True
else:
cyclic_subsets.add(vertex_labels)
return False
# Finds and returns a lower bound for the size of an FVS of graph
# G. This lower bound is based on some simple arguments. We assume
# that the input graph has no vertices of degree up to 2.
def fvs_lower_bound(G):
n = G.vcount()
if n == 0: # Don't do anything fancy if the graph is already empty
return 0
else:
max_degree = G.maxdegree()
first_lower_bound = int((n + 2)/(max_degree + 1))
degrees = sorted(G.degree(), reverse = True) # Sorted non-increasing
min_degree = degrees[(n - 1)]
d = min_degree - 2
k = first_lower_bound
degree_sum = sum(degrees[0:k])
# A potential improvement on the lower bound
new_lower_bound = (d*n - degree_sum + 2)/d
if (new_lower_bound > k): # There is some actual improvement
# in the lower bound.
# Iterate as long as we have the inequality right
while (k < new_lower_bound): # No FVS of size at most k
k = k + 1 # Try the next k
degree_sum = sum(degrees[0:k])
#new_lower_bound = n - degree_sum + 2
new_lower_bound = (d*n - degree_sum + 2)/d
return k
# Crude lower bound on fvs size, obtained by greedily packing
# cycles in the graph.
def approx_packing_number(G):
# Make a deep copy so that we don't mess up G.
|
# Pick a smallest cycle from G and return its vertex list. A
# multiple edge counts as a cycle. This function does not modify
# its argument.
def pick_smallest_cycle(G):
if G.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), G.es))
vertex_set = list | H = G.copy()
packing_approx = 0
while H.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), H.es))
H.delete_vertices(list(multi_edge.tuple))
packing_approx += 1
H_girth_vertices = H.girth(True)
while H_girth_vertices: # While there are cycles left in H
H.delete_vertices(H_girth_vertices)
packing_approx += 1
H_girth_vertices = H.girth(True)
return packing_approx | identifier_body |
fvs.py | () # Don't try to delete the label.
else: # Not a self-loop, but a multiple edge.
sole_neighbour = nbr1
sole_nbr_label = sole_neighbour['label']
if sole_nbr_label not in forbidden_set:
partial_fvs.append(sole_nbr_label)
else:
partial_fvs.append(next_label)
next_vertex.delete() # Don't try to delete the label.
if sole_nbr_label not in forbidden_set:
sole_neighbour = H.vs.find(label = sole_nbr_label)
neighbours = sole_neighbour.neighbors()
neighbour_labels = list(v['label'] for v in neighbours)
sole_neighbour.delete()
# discard = remove if present
small_degree_labels.discard(sole_nbr_label)
for neighbour_label in neighbour_labels:
if neighbour_label not in forbidden_set:
neighbour = H.vs.find(label = neighbour_label)
neighbour_degree = neighbour.degree()
if neighbour_degree <= 2:
small_degree_labels.add(neighbour_label)
else: # Our vertex has two distinct neighbours, so we
# bypass our vertex. Except if *both* the neighbours are
# in the forbidden set, in which case we don't do
# anything.
nbr1_label = nbr1['label']
nbr2_label = nbr2['label']
if (nbr1_label not in forbidden_set) or (nbr2_label not in forbidden_set):
next_vertex.delete() # Don't try to delete the label.
nbr1 = H.vs.find(label = nbr1_label)
nbr2 = H.vs.find(label = nbr2_label)
# Check if there is an existing edge between the
# two neighbours.
edge_id = H.get_eid(nbr1, nbr2, error = False)
if edge_id == -1: # There is no edge, so add one.
H.add_edge(nbr1, nbr2)
else: # There is already an edge. If this is a
# multiple edge, then check if our vertex
# deletion has made either of the two
# end-points to be of small degree, and update
# the set accordingly. Else add an edge to
# make it an edge of multiplicity 2.
edge = H.es[edge_id]
if edge.is_multiple():
for nbr in [nbr1, nbr2]:
neighbour_label = nbr['label']
if nbr.degree() <= 2 and \
neighbour_label not in \
forbidden_set :
small_degree_labels.add(neighbour_label)
else:
H.add_edge(nbr1, nbr2)
# Takes a graph G in igraph format and optionally a set of vertex
# labels of G as argument. Returns a tuple (partial_fvs,
# current_graph) where current_graph is the
# graph obtained by exhaustively applying the degree-0, 1, 2 rules
# to G, and partial_fvs is the set of those vertices that are forced
# into every FVS of G by these rules.
#
# None of the vertices whose labels are present in the (optional)
# list forbidden_set will be deleted during the application of
# these rules. This list is empty by default.
#
# Whenever we need to refer to vertices we actually use their
# labels instead.
#
# This function first makes a deep copy of G so that the input
# instance is not modified.
# Calling it "reduct" to avoid conflicts with the built-in reduce()
def reduct(G, forbidden_set = []):
# Since we will do frequent membership checking:
forbidden_set = set(forbidden_set)
H = G.copy() # Make a deep copy so that we don't change G.
# We use deques below because we build these lists element by
# element, which is not very efficient with the list data
# structure in Python.
partial_fvs = deque() # The partial fvs that we have computed
# so far.
# We add to a set the labels of those vertices H whose degree
# is at most 2, provided they are not in forbidden_set. We
# need to add the labels, and not the vertices themselves,
# because the labels are the one invariant for each vertex
# object. This is an artifact of the way igraph implements
# vertices.
labels = (v['label'] for v in H.vs if v.degree() <= 2)
small_degree_labels = set(labels) - forbidden_set
# From now on, the set small_degree_labels must contain
# exactly those vertices of H whose degrees are at most 2 and
# which are not in forbidden_set.
# We now process, according to the degree-0,1,2 reduction
# rules, the vertices whose labels are in
# small_degree_labels. Whenever we make the degree of a vertex
# to be at most 2, and we can't process this vertex
# immediately, and the vertex is not in forbidden_set, we
# add the label of this vertex to small_degree_labels.
while small_degree_labels:
next_label = small_degree_labels.pop()
next_vertex = H.vs.find(label = next_label)
next_degree = next_vertex.degree()
if next_label not in forbidden_set:
if next_degree == 0 :
# Throw this vertex away. We don't delete the label
# of this vertex from the set of small-degree labels,
# because we have popped this label already.
next_vertex.delete()
elif next_degree == 1 :
degree_one_rule(next_vertex, forbidden_set,
small_degree_labels)
else: # We found a vertex of degree 2.
degree_two_rule(next_vertex, forbidden_set,
small_degree_labels, partial_fvs)
# We have now exhaustively applied the reduction rules, and
# can return the resulting partial FVS, the remaining
# graph, and the set of deleted vertices.
return(set(partial_fvs), H)
# The subsets of vertices (i.e. vertex labels) of the input graph
# that we have found so far to induce non-forest subgraphs of the
# input graph. This is for speeding up computation, seems to work.
cyclic_subsets = set()
# Return True if graph G is a forest, and False otherwise. This
# does not modify G.
def is_forest(G):
#return (not G.has_multiple()) and (not G.girth())
# Doing some memoization seems to make this really faster.
global cyclic_subsets
vertex_labels = frozenset(v['label'] for v in G.vs)
if vertex_labels in cyclic_subsets:
return False
else:
isforest = (not G.has_multiple()) and (not G.girth())
if isforest:
return True
else:
cyclic_subsets.add(vertex_labels)
return False
# Finds and returns a lower bound for the size of an FVS of graph
# G. This lower bound is based on some simple arguments. We assume
# that the input graph has no vertices of degree up to 2.
def | (G):
n = G.vcount()
if n == 0: # Don't do anything fancy if the graph is already empty
return 0
else:
max_degree = G.maxdegree()
first_lower_bound = int((n + 2)/(max_degree + 1))
degrees = sorted(G.degree(), reverse = True) # Sorted non-increasing
min_degree = degrees[(n - 1)]
d = min_degree - 2
k = first_lower_bound
degree_sum = sum(degrees[0:k])
# A potential improvement on the lower bound
new_lower_bound = (d*n - degree_sum + 2)/d
if (new_lower_bound > k): # There is some actual improvement
# in the lower bound.
# Iterate as long as we have the inequality right
while (k < new_lower_bound): # No FVS of size at most k
k = k + 1 # Try the next k
degree_sum = sum(degrees[0:k])
#new_lower_bound = n - degree_sum + 2
new_lower_bound = (d*n - degree_sum + 2)/d
return k
# Crude lower bound on fvs size, obtained by greedily packing
# cycles in the graph.
def approx_packing_number(G):
# Make a deep copy so that we don't mess up G.
H = G.copy()
packing_approx = 0
while H.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), H.es))
H.delete_vertices(list(multi_edge.tuple))
packing_approx += 1
H_girth_vertices = H.girth(True)
while H_girth_vertices: # While there are cycles left in H
H.delete_vertices(H_girth_vertices)
packing_approx += 1
H_girth_vertices = H.girth(True)
return packing_approx
# Pick a smallest cycle from G and return its vertex list. A
# multiple edge counts as a cycle. This function does not modify
# its argument.
def pick_smallest_cycle(G):
if G.has_multiple(): # The girth function does not see multiedges.
multi_edge = next(dropwhile(lambda e: not e.is_multiple(), G.es))
vertex_set = | fvs_lower_bound | identifier_name |
renderer_go_func.go | BodyRenderer[T Importer] struct {
r *GoFuncRenderer[T]
}
)
// Returns sets up a return tuple of the function.
//
// We don't divide fmt.Stringer or string here, except stringers from
// [types] or ast [libraries
//
// Arguments are treated almost the same way as for function/method calls, it can be:
// - missing at all
// - a single instance of Params or *Params
// - a single instance of Commas or *Commas
// - a single instance of *types.Tuple.
// - a list of *types.Var.
// - a list of (K₁, V₁, K₂, V₂, ..., Kₙ, Vₙ), where *types.Var are not allowed
// for Ks anv Vs, Ks must be string or stringers and Vs must be string or
// stringers or *types.Tuple or
// and each Kᵢ value or String() can either be .
// - a list of (T₁, T₂, …, T₂ₙ₋₁) composed entirely of strings or fmt.Stringers with
// the last value being empty string (or .String() method returning an empty string)
//
// It may produce zero values expression for a return statement,
// but this rather depends on types, if this call could deduce
// their values. It puts zero expression into the rendering context
// under ReturnZeroValues name.
//
// Specifics:
//
// - If the last argument type is the error, "zero value" of the
// last return type is empty. It is because we mostly need them
// (zero values) to return an error, where we will be setting an
// expression for the last return value (error) ourselves.
// - Zero values depend on return types. We can only rely on
// text matching heuristics if types are represented as strings.
// We wouldn't have much trouble with types.Type or ast.Type though.
// In case if our return values are named, "zeroes" will be just
// these names. Except the case of "_" names of course, where we
// will use heuristics again.
//
// Raw text heuristics rules:
// - Builtin types like int, uint32, bool, string, etc are supported,
// even though they may be shadowed somehow. We just guess
// they weren't and this is acceptable for most cases.
// - Chans, maps, slices, pointers are supported too.
// - Error type is matched by its name, same guess as for builtins
// here.
func (r *GoFuncRenderer[T]) Returns(results ...any) *GoFuncBodyRenderer[T] {
var zeroes [ |
switch len(results) {
case 0:
case 1:
switch v := results[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
zeroes = heuristics.ZeroGuesses(v.data, nil)
r.results = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it be as is.
var zeroValues []string
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.results = append(r.results, [2]string{p.Name(), r.r.Type(p.Type())})
zeroes = append(zeroValues, zeroValueOfTypesType(r.r, p.Type(), i == v.Len()-1))
}
case string, fmt.Stringer:
r.results, _ = r.inPlaceSeq("argument", results...)
default:
panic(fmt.Errorf("unsupported result literal type %T", results[0]))
}
default:
r.results, zeroes = r.inPlaceSeq("argument", results...)
}
// Check if all zero values were computed and save ReturnZeroValues
zeroesAreValid := true
for i, zero := range zeroes {
if zero == "" {
zeroesAreValid = false
break
}
if i == len(zeroes)-1 && zero == consts.ErrorTypeZeroSign {
zeroes[i] = ""
break
}
}
if zeroesAreValid && len(zeroes) == len(r.results) {
r.r.SetReturnZeroValues(zeroes...)
}
return &GoFuncBodyRenderer[T]{
r: r,
}
}
// Body this renders function/method body.
func (r *GoFuncRenderer[T]) Body(f func(r *GoRenderer[T])) {
br := GoFuncBodyRenderer[T]{
r: r,
}
br.Body(f)
}
// Body renders function body with the provided f function.
func (r *GoFuncBodyRenderer[T]) Body(f func(r *GoRenderer[T])) {
var buf strings.Builder
buf.WriteString("func ")
if r.r.rcvr != nil {
buf.WriteByte('(')
buf.WriteString(r.r.r.S(*r.r.rcvr))
buf.WriteString(") ")
}
buf.WriteString(r.r.r.S(r.r.name))
buf.WriteByte('(')
for i, p := range r.r.params {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
if len(r.r.results) > 0 {
buf.WriteByte('(')
for i, p := range r.r.results {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
}
buf.WriteByte('{')
r.r.r.R(buf.String())
f(r.r.r)
r.r.r.R("}")
}
func (r *GoFuncRenderer[T]) kind() string {
if r.rcvr != nil {
return "method"
}
return "function"
}
func (r *GoFuncRenderer[T]) setFuncInfo(name string, params ...any) {
checkName(r.kind(), name)
r.name = name
switch len(params) {
case 0:
case 1:
switch v := params[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case Commas:
r.params = v.data
case *Commas:
r.params = v.data
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it as is.
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.params = append(r.params, [2]string{p.Name(), r.r.Type(p.Type())})
}
case string, fmt.Stringer:
r.params, _ = r.inPlaceSeq("argument", params...)
default:
panic(fmt.Errorf("unsupported parameter literal type %T", params[0]))
}
default:
r.params, _ = r.inPlaceSeq("argument", params...)
}
}
func (r *GoFuncRenderer[T]) setReceiverInfo(rcvr ...any) {
var rn string
var rt string
switch len(rcvr) {
case 1:
switch v := rcvr[0].(type) {
case string:
rt = v
case *types.Var:
r.takeVarName("receiver", v.Name())
rn = v.Name()
rt = r.r.Type(v.Type())
case fmt.Stringer:
rt = v.String()
case types.Type:
rt = r.r.Type(v)
case ast.Type:
rt = r.r.Proto(v).Impl()
default:
panic(fmt.Sprintf(
"single receiver value type can be string|fmt.String|%T|%T|%T, got %T",
new(types.Var),
types.Type(nil),
ast.Type(nil),
rcvr[0],
))
}
case 2:
switch v := rcvr[0].(type) {
case string:
rn = v
case fmt.Stringer:
rn = v.String()
default:
panic(fmt.Sprintf("receiver name can be either string or fmt.Stringer, got %T", rcvr[0]))
}
r.takeVarName("receiver", rn)
switch v := rcvr[1].(type) {
case string:
rt = v
case fmt.Stringer:
rt = v.String()
case types.Type:
rt | ]string | identifier_name |
renderer_go_func.go | 32, bool, string, etc are supported,
// even though they may be shadowed somehow. We just guess
// they weren't and this is acceptable for most cases.
// - Chans, maps, slices, pointers are supported too.
// - Error type is matched by its name, same guess as for builtins
// here.
func (r *GoFuncRenderer[T]) Returns(results ...any) *GoFuncBodyRenderer[T] {
var zeroes []string
switch len(results) {
case 0:
case 1:
switch v := results[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
zeroes = heuristics.ZeroGuesses(v.data, nil)
r.results = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it be as is.
var zeroValues []string
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.results = append(r.results, [2]string{p.Name(), r.r.Type(p.Type())})
zeroes = append(zeroValues, zeroValueOfTypesType(r.r, p.Type(), i == v.Len()-1))
}
case string, fmt.Stringer:
r.results, _ = r.inPlaceSeq("argument", results...)
default:
panic(fmt.Errorf("unsupported result literal type %T", results[0]))
}
default:
r.results, zeroes = r.inPlaceSeq("argument", results...)
}
// Check if all zero values were computed and save ReturnZeroValues
zeroesAreValid := true
for i, zero := range zeroes {
if zero == "" {
zeroesAreValid = false
break
}
if i == len(zeroes)-1 && zero == consts.ErrorTypeZeroSign {
zeroes[i] = ""
break
}
}
if zeroesAreValid && len(zeroes) == len(r.results) {
r.r.SetReturnZeroValues(zeroes...)
}
return &GoFuncBodyRenderer[T]{
r: r,
}
}
// Body this renders function/method body.
func (r *GoFuncRenderer[T]) Body(f func(r *GoRenderer[T])) {
br := GoFuncBodyRenderer[T]{
r: r,
}
br.Body(f)
}
// Body renders function body with the provided f function.
func (r *GoFuncBodyRenderer[T]) Body(f func(r *GoRenderer[T])) {
var buf strings.Builder
buf.WriteString("func ")
if r.r.rcvr != nil {
buf.WriteByte('(')
buf.WriteString(r.r.r.S(*r.r.rcvr))
buf.WriteString(") ")
}
buf.WriteString(r.r.r.S(r.r.name))
buf.WriteByte('(')
for i, p := range r.r.params {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
if len(r.r.results) > 0 {
buf.WriteByte('(')
for i, p := range r.r.results {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
}
buf.WriteByte('{')
r.r.r.R(buf.String())
f(r.r.r)
r.r.r.R("}")
}
func (r *GoFuncRenderer[T]) kind() string {
if r.rcvr != nil {
return "method"
}
return "function"
}
func (r *GoFuncRenderer[T]) setFuncInfo(name string, params ...any) {
checkName(r.kind(), name)
r.name = name
switch len(params) {
case 0:
case 1:
switch v := params[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case Commas:
r.params = v.data
case *Commas:
r.params = v.data
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it as is.
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.params = append(r.params, [2]string{p.Name(), r.r.Type(p.Type())})
}
case string, fmt.Stringer:
r.params, _ = r.inPlaceSeq("argument", params...)
default:
panic(fmt.Errorf("unsupported parameter literal type %T", params[0]))
}
default:
r.params, _ = r.inPlaceSeq("argument", params...)
}
}
func (r *GoFuncRenderer[T]) setReceiverInfo(rcvr ...any) {
var rn string
var rt string
switch len(rcvr) {
case 1:
switch v := rcvr[0].(type) {
case string:
rt = v
case *types.Var:
r.takeVarName("receiver", v.Name())
rn = v.Name()
rt = r.r.Type(v.Type())
case fmt.Stringer:
rt = v.String()
case types.Type:
rt = r.r.Type(v)
case ast.Type:
rt = r.r.Proto(v).Impl()
default:
panic(fmt.Sprintf(
"single receiver value type can be string|fmt.String|%T|%T|%T, got %T",
new(types.Var),
types.Type(nil),
ast.Type(nil),
rcvr[0],
))
}
case 2:
switch v := rcvr[0].(type) {
case string:
rn = v
case fmt.Stringer:
rn = v.String()
default:
panic(fmt.Sprintf("receiver name can be either string or fmt.Stringer, got %T", rcvr[0]))
}
r.takeVarName("receiver", rn)
switch v := rcvr[1].(type) {
case string:
rt = v
case fmt.Stringer:
rt = v.String()
case types.Type:
rt = r.r.Type(v)
case ast.Type:
rt = r.r.Proto(v).Impl()
default:
panic(fmt.Sprintf(
"receiver type parameter can be string|fmt.String|%T|%T, got %T",
types.Type(nil),
ast.Type(nil),
rcvr[0],
))
}
default:
panic(fmt.Sprintf("receiver data length can be either 1 or 2, got %d", len(rcvr)))
}
receiver := rn + " " + rt
r.rcvr = &receiver
}
func (r *GoFuncRenderer[T]) inPlaceSeq(what string, tuples ...any) ([][2]string, []string) {
if len(tuples) == 0 {
return nil, nil
}
defer func() {
p := recover()
if p == nil {
return
}
panic(fmt.Sprintf("build %s %s %s: %v", r.kind(), r.name, what, p))
}()
// Проверяем, что есть
if _, isVar := tuples[0].(*types.Var); isVar {
return r.varArguments("argument", tuples...)
} else {
return r.semiManualArguments("argument", tuples...)
}
}
func (r *GoFuncRenderer[T]) varArguments(what string, params ...any) (res [][2]string, zeroes []string) {
checker := tupleNamesChecker{
what: what,
plural: what + "s",
empties: 0,
nonEmpties: 0,
}
for i, param := range params {
p, ok := param.(*types.Var)
if !ok {
panic(fmt.Sprintf(
"process | parameter index %d: expected it to be %T got %T",
i,
new(types.Var),
param,
))
}
checker.reg(p.Name())
r.takeVarName(what, p.Name())
res = append(res, [2]string{p.Name(), r.r.Type(p.Type())})
zeroes = append(zeroes, zeroValueOfTypesType(r.r, p.Type(), i == len(params)-1))
}
return
}
func (r *GoFuncRenderer[T]) semiManualArguments(what string, para | conditional_block |
|
renderer_go_func.go | Renderer[T Importer] struct {
r *GoFuncRenderer[T]
}
)
// Returns sets up a return tuple of the function.
//
// We don't divide fmt.Stringer or string here, except stringers from
// [types] or ast [libraries
//
// Arguments are treated almost the same way as for function/method calls, it can be:
// - missing at all
// - a single instance of Params or *Params
// - a single instance of Commas or *Commas
// - a single instance of *types.Tuple.
// - a list of *types.Var.
// - a list of (K₁, V₁, K₂, V₂, ..., Kₙ, Vₙ), where *types.Var are not allowed
// for Ks anv Vs, Ks must be string or stringers and Vs must be string or
// stringers or *types.Tuple or
// and each Kᵢ value or String() can either be .
// - a list of (T₁, T₂, …, T₂ₙ₋₁) composed entirely of strings or fmt.Stringers with
// the last value being empty string (or .String() method returning an empty string)
//
// It may produce zero values expression for a return statement,
// but this rather depends on types, if this call could deduce
// their values. It puts zero expression into the rendering context
// under ReturnZeroValues name.
//
// Specifics:
//
// - If the last argument type is the error, "zero value" of the
// last return type is empty. It is because we mostly need them
// (zero values) to return an error, where we will be setting an
// expression for the last return value (error) ourselves.
// - Zero values depend on return types. We can only rely on
// text matching heuristics if types are represented as strings.
// We wouldn't have much trouble with types.Type or ast.Type though.
// In case if our return values are named, "zeroes" will be just
// these names. Except the case of "_" names of course, where we
// will use heuristics again.
//
// Raw text heuristics rules:
// - Builtin types like int, uint32, bool, string, etc are supported,
// even though they may be shadowed somehow. We just guess
// they weren't and this is acceptable for most cases.
// - Chans, maps, slices, pointers are supported too.
// - Error type is matched by its name, same guess as for builtins
// here.
func (r *GoFuncRenderer[T]) Returns(results ...any) *GoFuncBodyRenderer[T] {
var zeroes []string
switch len(results) {
case 0:
case 1:
switch v := results[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
zeroes = heuristics.ZeroGuesses(v.data, nil)
r.results = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it be as is.
var zeroValues []string
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.results = append(r.results, [2]string{p.Name(), r.r.Type(p.Type())})
zeroes = append(zeroValues, zeroValueOfTypesType(r.r, p.Type(), i == v.Len()-1))
}
case string, fmt.Stringer:
r.results, _ = r.inPlaceSeq("argument", results...)
default:
panic(fmt.Errorf("unsupported result literal type %T", results[0]))
}
default:
r.results, zeroes = r.inPlaceSeq("argument", results...)
}
// Check if all zero values were computed and save ReturnZeroValues
zeroesAreValid := true
for i, zero := range zeroes {
if zero == "" {
zeroesAreValid = false
break
}
if i == len(zeroes)-1 && zero == consts.ErrorTypeZeroSign {
zeroes[i] = ""
break
}
}
if zeroesAreValid && len(zeroes) == len(r.results) {
r.r.SetReturnZeroValues(zeroes...)
}
return &GoFuncBodyRenderer[T]{
r: r,
}
}
// Body this renders function/method body.
func (r *GoFuncRenderer[T]) Body(f func(r *GoRenderer[T])) {
br := GoFuncBodyRenderer[T]{
r: r,
}
br.Body(f)
}
// Body renders function body with the provided f function.
func (r *GoFuncBodyRenderer[T]) Body(f func(r *GoRenderer[T])) {
var buf strings.Builder
buf.WriteString("func ")
if r.r.rcvr != nil {
buf.WriteByte('(')
buf.WriteString(r.r.r.S(*r.r.rcvr))
buf.WriteString(") ")
}
buf.WriteString(r.r.r.S(r.r.name))
buf.WriteByte('(')
for i, p := range r.r.params {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
if len(r.r.results) > 0 {
buf.WriteByte('(')
for i, p := range r.r.results {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
}
buf.WriteByte('{')
r.r.r.R(buf.String())
f(r.r.r)
r.r.r.R("}")
}
func (r *GoFuncRenderer[T]) kind() string {
if r.rcvr != nil {
return "method"
}
return "function" | ...any) {
checkName(r.kind(), name)
r.name = name
switch len(params) {
case 0:
case 1:
switch v := params[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case Commas:
r.params = v.data
case *Commas:
r.params = v.data
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it as is.
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.params = append(r.params, [2]string{p.Name(), r.r.Type(p.Type())})
}
case string, fmt.Stringer:
r.params, _ = r.inPlaceSeq("argument", params...)
default:
panic(fmt.Errorf("unsupported parameter literal type %T", params[0]))
}
default:
r.params, _ = r.inPlaceSeq("argument", params...)
}
}
func (r *GoFuncRenderer[T]) setReceiverInfo(rcvr ...any) {
var rn string
var rt string
switch len(rcvr) {
case 1:
switch v := rcvr[0].(type) {
case string:
rt = v
case *types.Var:
r.takeVarName("receiver", v.Name())
rn = v.Name()
rt = r.r.Type(v.Type())
case fmt.Stringer:
rt = v.String()
case types.Type:
rt = r.r.Type(v)
case ast.Type:
rt = r.r.Proto(v).Impl()
default:
panic(fmt.Sprintf(
"single receiver value type can be string|fmt.String|%T|%T|%T, got %T",
new(types.Var),
types.Type(nil),
ast.Type(nil),
rcvr[0],
))
}
case 2:
switch v := rcvr[0].(type) {
case string:
rn = v
case fmt.Stringer:
rn = v.String()
default:
panic(fmt.Sprintf("receiver name can be either string or fmt.Stringer, got %T", rcvr[0]))
}
r.takeVarName("receiver", rn)
switch v := rcvr[1].(type) {
case string:
rt = v
case fmt.Stringer:
rt = v.String()
case types.Type:
|
}
func (r *GoFuncRenderer[T]) setFuncInfo(name string, params | identifier_body |
renderer_go_func.go | // So, the usage of this method will be like
// r.M("t", "*Type")("Name")("ctx $ctx.Context").Returns("string", "error, "").Body(func(…) {
// r.L(`return $ZeroReturnValue $errs.New("error")`)
// })
// Producing this code
// func (t *Type) Name(ctx context.Context) (string, error) {
// return "", errors.New("error")
// }
func (r *GoRenderer[T]) M(rcvr ...any) func(name string) func(params ...any) *GoFuncRenderer[T] {
return func(name string) func(params ...any) *GoFuncRenderer[T] {
res := &GoFuncRenderer[T]{
r: r.Scope(),
rcvr: nil,
params: nil,
results: nil,
}
res.setReceiverInfo(rcvr...)
return func(params ...any) *GoFuncRenderer[T] {
res.setFuncInfo(name, params...)
return res
}
}
}
type (
// GoFuncRenderer renders definitions of functions and methods.
GoFuncRenderer[T Importer] struct {
r *GoRenderer[T]
rcvr *string
name string
params [][2]string
results [][2]string
}
// GoFuncBodyRenderer renders function/method body.
GoFuncBodyRenderer[T Importer] struct {
r *GoFuncRenderer[T]
}
)
// Returns sets up a return tuple of the function.
//
// We don't divide fmt.Stringer or string here, except stringers from
// [types] or ast [libraries
//
// Arguments are treated almost the same way as for function/method calls, it can be:
// - missing at all
// - a single instance of Params or *Params
// - a single instance of Commas or *Commas
// - a single instance of *types.Tuple.
// - a list of *types.Var.
// - a list of (K₁, V₁, K₂, V₂, ..., Kₙ, Vₙ), where *types.Var are not allowed
// for Ks anv Vs, Ks must be string or stringers and Vs must be string or
// stringers or *types.Tuple or
// and each Kᵢ value or String() can either be .
// - a list of (T₁, T₂, …, T₂ₙ₋₁) composed entirely of strings or fmt.Stringers with
// the last value being empty string (or .String() method returning an empty string)
//
// It may produce zero values expression for a return statement,
// but this rather depends on types, if this call could deduce
// their values. It puts zero expression into the rendering context
// under ReturnZeroValues name.
//
// Specifics:
//
// - If the last argument type is the error, "zero value" of the
// last return type is empty. It is because we mostly need them
// (zero values) to return an error, where we will be setting an
// expression for the last return value (error) ourselves.
// - Zero values depend on return types. We can only rely on
// text matching heuristics if types are represented as strings.
// We wouldn't have much trouble with types.Type or ast.Type though.
// In case if our return values are named, "zeroes" will be just
// these names. Except the case of "_" names of course, where we
// will use heuristics again.
//
// Raw text heuristics rules:
// - Builtin types like int, uint32, bool, string, etc are supported,
// even though they may be shadowed somehow. We just guess
// they weren't and this is acceptable for most cases.
// - Chans, maps, slices, pointers are supported too.
// - Error type is matched by its name, same guess as for builtins
// here.
func (r *GoFuncRenderer[T]) Returns(results ...any) *GoFuncBodyRenderer[T] {
var zeroes []string
switch len(results) {
case 0:
case 1:
switch v := results[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
zeroes = heuristics.ZeroGuesses(v.data, nil)
r.results = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *Commas:
r.results = v.data
zeroes = heuristics.ZeroGuesses(v.data, nil)
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it be as is.
var zeroValues []string
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.results = append(r.results, [2]string{p.Name(), r.r.Type(p.Type())})
zeroes = append(zeroValues, zeroValueOfTypesType(r.r, p.Type(), i == v.Len()-1))
}
case string, fmt.Stringer:
r.results, _ = r.inPlaceSeq("argument", results...)
default:
panic(fmt.Errorf("unsupported result literal type %T", results[0]))
}
default:
r.results, zeroes = r.inPlaceSeq("argument", results...)
}
// Check if all zero values were computed and save ReturnZeroValues
zeroesAreValid := true
for i, zero := range zeroes {
if zero == "" {
zeroesAreValid = false
break
}
if i == len(zeroes)-1 && zero == consts.ErrorTypeZeroSign {
zeroes[i] = ""
break
}
}
if zeroesAreValid && len(zeroes) == len(r.results) {
r.r.SetReturnZeroValues(zeroes...)
}
return &GoFuncBodyRenderer[T]{
r: r,
}
}
// Body this renders function/method body.
func (r *GoFuncRenderer[T]) Body(f func(r *GoRenderer[T])) {
br := GoFuncBodyRenderer[T]{
r: r,
}
br.Body(f)
}
// Body renders function body with the provided f function.
func (r *GoFuncBodyRenderer[T]) Body(f func(r *GoRenderer[T])) {
var buf strings.Builder
buf.WriteString("func ")
if r.r.rcvr != nil {
buf.WriteByte('(')
buf.WriteString(r.r.r.S(*r.r.rcvr))
buf.WriteString(") ")
}
buf.WriteString(r.r.r.S(r.r.name))
buf.WriteByte('(')
for i, p := range r.r.params {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
if len(r.r.results) > 0 {
buf.WriteByte('(')
for i, p := range r.r.results {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(r.r.r.S(p[0]))
buf.WriteByte(' ')
buf.WriteString(r.r.r.S(p[1]))
}
buf.WriteString(") ")
}
buf.WriteByte('{')
r.r.r.R(buf.String())
f(r.r.r)
r.r.r.R("}")
}
func (r *GoFuncRenderer[T]) kind() string {
if r.rcvr != nil {
return "method"
}
return "function"
}
func (r *GoFuncRenderer[T]) setFuncInfo(name string, params ...any) {
checkName(r.kind(), name)
r.name = name
switch len(params) {
case 0:
case 1:
switch v := params[0].(type) {
case Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case *Params:
r.checkSeqsUniq("argument", "arguments", v.commasSeq)
r.params = v.data
case Commas:
r.params = v.data
case *Commas:
r.params = v.data
case *types.Tuple:
// We guess it is just an existing tuple from a source code
// that has to be correct, so let it as is.
for i := 0; i < v.Len(); i++ {
p := v.At(i)
r.takeVarName("argument", p.Name())
r.params = append(r.params, [2]string{p.Name(), r.r.Type(p.Type())})
}
case string, fmt.Stringer:
r.params, _ = r.inPlaceSeq("argument", params...)
default:
panic(fmt.Errorf("unsupported parameter literal type %T", params[0]))
}
default:
r.params, _ = r.inPlaceSeq("argument", params | //
// The return value is a function with a signature whose semantics matches F.
// | random_line_split |
|
compressor_params.rs | struct CompressorParams(pub *mut sys::CompressorParams);
impl Default for CompressorParams {
fn default() -> Self {
Self::new()
}
}
impl CompressorParams {
/// Create a compressor with default options
pub fn new() -> Self {
unsafe {
let mut params = CompressorParams(sys::compressor_params_new());
params.set_default_options();
params
}
}
/// Resets the compressor params to default state
pub fn reset(&mut self) |
// The default options that are applied when creating a new compressor or calling reset() on it
fn set_default_options(&mut self) {
// Set a default quality level. Leaving this unset results in undefined behavior, so we set
// it to a working value by default
self.set_etc1s_quality_level(crate::ETC1S_QUALITY_DEFAULT);
self.set_uastc_quality_level(crate::UASTC_QUALITY_DEFAULT);
// The library by default prints to stdout, but since this is a library we should disable
// that by default
self.set_print_status_to_stdout(false);
}
//
// These function are used to load image data into the compressor
//
/// Get a reference to the source index. The internal list of source images is resized as needed
/// such that the image will exist
pub fn source_image_mut(
&mut self,
image_index: u32,
) -> CompressorImageRef {
unsafe {
CompressorImageRef(sys::compressor_params_get_or_create_source_image(
self.0,
image_index,
))
}
}
/// Resizes the source image list. If the provided length is shorter than the list, the data
/// beyond the provided length is truncated.
pub fn resize_source_image_list(
&mut self,
size: u32,
) {
unsafe {
sys::compressor_params_resize_source_image_list(self.0, size as _);
}
}
/// Resets the image list to be zero-length
pub fn clear_source_image_list(&mut self) {
unsafe {
sys::compressor_params_clear_source_image_list(self.0);
}
}
//
// These set parameters for compression
//
/// Enable stdout logging
pub fn set_print_status_to_stdout(
&mut self,
print_status_to_stdout: bool,
) {
unsafe { sys::compressor_params_set_status_output(self.0, print_status_to_stdout) }
}
/// Set ETC1S quality level. The value MUST be >= [ETC1S_QUALITY_MIN](crate::ETC1S_QUALITY_MIN)
/// and <= [ETC1S_QUALITY_MAX](crate::ETC1S_QUALITY_MAX).
pub fn set_etc1s_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::ETC1S_QUALITY_MIN);
assert!(quality_level <= crate::ETC1S_QUALITY_MAX);
unsafe {
sys::compressor_params_set_quality_level(self.0, quality_level as i32);
}
}
/// Sets UASTC quality level. The value MUST be >= [UASTC_QUALITY_MIN](crate::UASTC_QUALITY_MIN)
/// and <= [UASTC_QUALITY_MAX](crate::UASTC_QUALITY_MAX).
pub fn set_uastc_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::UASTC_QUALITY_MIN);
assert!(quality_level <= crate::UASTC_QUALITY_MAX);
unsafe {
let mut flags = sys::compressor_params_get_pack_uastc_flags(self.0);
flags |= quality_level;
sys::compressor_params_set_pack_uastc_flags(self.0, flags);
}
}
/// Use the global codebook to compress the image. slightly smaller files, but lower quality,
/// slower encoding
pub fn set_use_global_codebook(
&mut self,
use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_global_sel_pal(self.0, use_global_codebook);
}
}
/// Automatically use virtual selector palettes on small images for slightly smaller files
/// (defaults to off for faster encoding time)
pub fn set_auto_use_global_codebook(
&mut self,
auto_use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_auto_global_sel_pal(self.0, auto_use_global_codebook);
}
}
/// Set the basis format we will compress to. See basis documentation for details. This
/// corresponds to the -uastc flag in the basisu command line tool and the m_uastc boolean param
/// on `basis_compressor_params` in the original library
///
/// UASTC encoding result in significantly higher texture quality, but larger files.
pub fn set_basis_format(
&mut self,
basis_format: BasisTextureFormat,
) {
let is_uastc = match basis_format {
BasisTextureFormat::ETC1S => false,
BasisTextureFormat::UASTC4x4 => true,
};
unsafe {
sys::compressor_params_set_uastc(self.0, is_uastc);
}
}
/// Sets the color space the images to be compressed is encoded in
///
/// Setting a linear color space will:
/// * Use linear colorspace metrics (instead of the default sRGB)
/// * By default use linear (not sRGB) mipmap filtering
pub fn set_color_space(
&mut self,
color_space: ColorSpace,
) {
let perceptual = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_perceptual(self.0, perceptual);
}
}
/// Override the mipmap generation color space behavior. This function is not necessary to call
/// if you call [set_color_space] with the correct value.
///
/// * If the color space is sRGB, convert image to linear before filtering, then back to sRGB
/// * If the color space is linear, we keep the image in linear during mipmap filtering
/// (i.e. do not convert to/from sRGB for filtering purposes)
pub fn set_mip_color_space(
&mut self,
color_space: ColorSpace,
) {
let mip_srgb = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_mip_srgb(self.0, mip_srgb);
}
}
/// Disable backend's selector rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_selector_rdo(
&mut self,
no_selector_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_selector_rdo(self.0, no_selector_rdo);
}
}
/// Disable backend's endpoint rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_endpoint_rdo(
&mut self,
no_endpoint_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_endpoint_rdo(self.0, no_endpoint_rdo);
}
}
/// Enable/disable UASTC RDO post-processing and set UASTC RDO quality scalar to X. Lower
/// values=higher quality/larger LZ compressed files, higher values=lower quality/smaller LZ
/// compressed files. Good range to try is [.2-4]
pub fn set_rdo_uastc(
&mut self,
rdo_uastc_quality_scalar: Option<f32>,
) {
unsafe {
match rdo_uastc_quality_scalar {
Some(quality_scalar) => {
sys::compressor_params_set_rdo_uastc(self.0, true);
sys::compressor_params_set_rdo_uastc_quality_scalar(self.0, quality_scalar);
}
None => {
sys::compressor_params_set_rdo_uastc(self.0, false);
}
}
}
}
/// Generate mipmaps for each source image
///
/// By default, sRGB textures will be converted from sRGB to linear before mipmap filtering.
/// This can be changed by calling [set_color_space] or [set_mip_color_space]
pub fn set_generate_mipmaps(
&mut self,
generate_mipmaps: bool,
) {
unsafe {
sys::compressor_params_set_generate_mipmaps(self.0, generate_mipmaps);
}
}
/// Sets the smallest dimension mipmap that will be generated
pub fn set_mipmap_smallest_dimension(
&mut self,
smallest_dimension: u32,
) {
unsafe {
sys::compressor_params_set_mip_smallest_dimension(self.0, smallest_dimension as _);
}
}
/// Set arbitrary userdata to be included with the basis-universal binary data
pub fn set_userdata(
| {
unsafe {
sys::compressor_params_clear(self.0);
self.set_default_options();
self.clear_source_image_list();
}
} | identifier_body |
compressor_params.rs | pub struct CompressorParams(pub *mut sys::CompressorParams);
impl Default for CompressorParams {
fn default() -> Self {
Self::new()
}
}
impl CompressorParams {
/// Create a compressor with default options
pub fn new() -> Self {
unsafe {
let mut params = CompressorParams(sys::compressor_params_new());
params.set_default_options();
params
}
}
/// Resets the compressor params to default state
pub fn reset(&mut self) {
unsafe {
sys::compressor_params_clear(self.0);
self.set_default_options();
self.clear_source_image_list();
}
}
// The default options that are applied when creating a new compressor or calling reset() on it
fn set_default_options(&mut self) {
// Set a default quality level. Leaving this unset results in undefined behavior, so we set
// it to a working value by default
self.set_etc1s_quality_level(crate::ETC1S_QUALITY_DEFAULT);
self.set_uastc_quality_level(crate::UASTC_QUALITY_DEFAULT);
// The library by default prints to stdout, but since this is a library we should disable
// that by default
self.set_print_status_to_stdout(false);
}
//
// These function are used to load image data into the compressor
//
/// Get a reference to the source index. The internal list of source images is resized as needed
/// such that the image will exist
pub fn source_image_mut(
&mut self,
image_index: u32,
) -> CompressorImageRef {
unsafe {
CompressorImageRef(sys::compressor_params_get_or_create_source_image(
self.0,
image_index,
))
}
}
/// Resizes the source image list. If the provided length is shorter than the list, the data
/// beyond the provided length is truncated.
pub fn resize_source_image_list(
&mut self,
size: u32,
) {
unsafe {
sys::compressor_params_resize_source_image_list(self.0, size as _);
}
}
/// Resets the image list to be zero-length
pub fn clear_source_image_list(&mut self) {
unsafe {
sys::compressor_params_clear_source_image_list(self.0);
}
}
//
// These set parameters for compression
//
/// Enable stdout logging
pub fn | (
&mut self,
print_status_to_stdout: bool,
) {
unsafe { sys::compressor_params_set_status_output(self.0, print_status_to_stdout) }
}
/// Set ETC1S quality level. The value MUST be >= [ETC1S_QUALITY_MIN](crate::ETC1S_QUALITY_MIN)
/// and <= [ETC1S_QUALITY_MAX](crate::ETC1S_QUALITY_MAX).
pub fn set_etc1s_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::ETC1S_QUALITY_MIN);
assert!(quality_level <= crate::ETC1S_QUALITY_MAX);
unsafe {
sys::compressor_params_set_quality_level(self.0, quality_level as i32);
}
}
/// Sets UASTC quality level. The value MUST be >= [UASTC_QUALITY_MIN](crate::UASTC_QUALITY_MIN)
/// and <= [UASTC_QUALITY_MAX](crate::UASTC_QUALITY_MAX).
pub fn set_uastc_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::UASTC_QUALITY_MIN);
assert!(quality_level <= crate::UASTC_QUALITY_MAX);
unsafe {
let mut flags = sys::compressor_params_get_pack_uastc_flags(self.0);
flags |= quality_level;
sys::compressor_params_set_pack_uastc_flags(self.0, flags);
}
}
/// Use the global codebook to compress the image. slightly smaller files, but lower quality,
/// slower encoding
pub fn set_use_global_codebook(
&mut self,
use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_global_sel_pal(self.0, use_global_codebook);
}
}
/// Automatically use virtual selector palettes on small images for slightly smaller files
/// (defaults to off for faster encoding time)
pub fn set_auto_use_global_codebook(
&mut self,
auto_use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_auto_global_sel_pal(self.0, auto_use_global_codebook);
}
}
/// Set the basis format we will compress to. See basis documentation for details. This
/// corresponds to the -uastc flag in the basisu command line tool and the m_uastc boolean param
/// on `basis_compressor_params` in the original library
///
/// UASTC encoding result in significantly higher texture quality, but larger files.
pub fn set_basis_format(
&mut self,
basis_format: BasisTextureFormat,
) {
let is_uastc = match basis_format {
BasisTextureFormat::ETC1S => false,
BasisTextureFormat::UASTC4x4 => true,
};
unsafe {
sys::compressor_params_set_uastc(self.0, is_uastc);
}
}
/// Sets the color space the images to be compressed is encoded in
///
/// Setting a linear color space will:
/// * Use linear colorspace metrics (instead of the default sRGB)
/// * By default use linear (not sRGB) mipmap filtering
pub fn set_color_space(
&mut self,
color_space: ColorSpace,
) {
let perceptual = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_perceptual(self.0, perceptual);
}
}
/// Override the mipmap generation color space behavior. This function is not necessary to call
/// if you call [set_color_space] with the correct value.
///
/// * If the color space is sRGB, convert image to linear before filtering, then back to sRGB
/// * If the color space is linear, we keep the image in linear during mipmap filtering
/// (i.e. do not convert to/from sRGB for filtering purposes)
pub fn set_mip_color_space(
&mut self,
color_space: ColorSpace,
) {
let mip_srgb = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_mip_srgb(self.0, mip_srgb);
}
}
/// Disable backend's selector rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_selector_rdo(
&mut self,
no_selector_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_selector_rdo(self.0, no_selector_rdo);
}
}
/// Disable backend's endpoint rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_endpoint_rdo(
&mut self,
no_endpoint_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_endpoint_rdo(self.0, no_endpoint_rdo);
}
}
/// Enable/disable UASTC RDO post-processing and set UASTC RDO quality scalar to X. Lower
/// values=higher quality/larger LZ compressed files, higher values=lower quality/smaller LZ
/// compressed files. Good range to try is [.2-4]
pub fn set_rdo_uastc(
&mut self,
rdo_uastc_quality_scalar: Option<f32>,
) {
unsafe {
match rdo_uastc_quality_scalar {
Some(quality_scalar) => {
sys::compressor_params_set_rdo_uastc(self.0, true);
sys::compressor_params_set_rdo_uastc_quality_scalar(self.0, quality_scalar);
}
None => {
sys::compressor_params_set_rdo_uastc(self.0, false);
}
}
}
}
/// Generate mipmaps for each source image
///
/// By default, sRGB textures will be converted from sRGB to linear before mipmap filtering.
/// This can be changed by calling [set_color_space] or [set_mip_color_space]
pub fn set_generate_mipmaps(
&mut self,
generate_mipmaps: bool,
) {
unsafe {
sys::compressor_params_set_generate_mipmaps(self.0, generate_mipmaps);
}
}
/// Sets the smallest dimension mipmap that will be generated
pub fn set_mipmap_smallest_dimension(
&mut self,
smallest_dimension: u32,
) {
unsafe {
sys::compressor_params_set_mip_smallest_dimension(self.0, smallest_dimension as _);
}
}
/// Set arbitrary userdata to be included with the basis-universal binary data
pub fn set_userdata(
| set_print_status_to_stdout | identifier_name |
compressor_params.rs | pub struct CompressorParams(pub *mut sys::CompressorParams);
impl Default for CompressorParams {
fn default() -> Self {
Self::new()
}
}
impl CompressorParams {
/// Create a compressor with default options
pub fn new() -> Self {
unsafe {
let mut params = CompressorParams(sys::compressor_params_new());
params.set_default_options();
params
}
}
/// Resets the compressor params to default state
pub fn reset(&mut self) {
unsafe {
sys::compressor_params_clear(self.0);
self.set_default_options();
self.clear_source_image_list();
}
}
// The default options that are applied when creating a new compressor or calling reset() on it
fn set_default_options(&mut self) {
// Set a default quality level. Leaving this unset results in undefined behavior, so we set
// it to a working value by default
self.set_etc1s_quality_level(crate::ETC1S_QUALITY_DEFAULT);
self.set_uastc_quality_level(crate::UASTC_QUALITY_DEFAULT);
// The library by default prints to stdout, but since this is a library we should disable
// that by default
self.set_print_status_to_stdout(false);
}
//
// These function are used to load image data into the compressor
//
/// Get a reference to the source index. The internal list of source images is resized as needed
/// such that the image will exist
pub fn source_image_mut(
&mut self,
image_index: u32,
) -> CompressorImageRef {
unsafe {
CompressorImageRef(sys::compressor_params_get_or_create_source_image(
self.0,
image_index,
))
}
}
/// Resizes the source image list. If the provided length is shorter than the list, the data
/// beyond the provided length is truncated.
pub fn resize_source_image_list(
&mut self,
size: u32,
) {
unsafe {
sys::compressor_params_resize_source_image_list(self.0, size as _);
}
}
/// Resets the image list to be zero-length
pub fn clear_source_image_list(&mut self) {
unsafe {
sys::compressor_params_clear_source_image_list(self.0);
}
}
//
// These set parameters for compression
//
/// Enable stdout logging
pub fn set_print_status_to_stdout(
&mut self, | }
/// Set ETC1S quality level. The value MUST be >= [ETC1S_QUALITY_MIN](crate::ETC1S_QUALITY_MIN)
/// and <= [ETC1S_QUALITY_MAX](crate::ETC1S_QUALITY_MAX).
pub fn set_etc1s_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::ETC1S_QUALITY_MIN);
assert!(quality_level <= crate::ETC1S_QUALITY_MAX);
unsafe {
sys::compressor_params_set_quality_level(self.0, quality_level as i32);
}
}
/// Sets UASTC quality level. The value MUST be >= [UASTC_QUALITY_MIN](crate::UASTC_QUALITY_MIN)
/// and <= [UASTC_QUALITY_MAX](crate::UASTC_QUALITY_MAX).
pub fn set_uastc_quality_level(
&mut self,
quality_level: u32,
) {
assert!(quality_level >= crate::UASTC_QUALITY_MIN);
assert!(quality_level <= crate::UASTC_QUALITY_MAX);
unsafe {
let mut flags = sys::compressor_params_get_pack_uastc_flags(self.0);
flags |= quality_level;
sys::compressor_params_set_pack_uastc_flags(self.0, flags);
}
}
/// Use the global codebook to compress the image. slightly smaller files, but lower quality,
/// slower encoding
pub fn set_use_global_codebook(
&mut self,
use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_global_sel_pal(self.0, use_global_codebook);
}
}
/// Automatically use virtual selector palettes on small images for slightly smaller files
/// (defaults to off for faster encoding time)
pub fn set_auto_use_global_codebook(
&mut self,
auto_use_global_codebook: bool,
) {
unsafe {
sys::compressor_params_set_auto_global_sel_pal(self.0, auto_use_global_codebook);
}
}
/// Set the basis format we will compress to. See basis documentation for details. This
/// corresponds to the -uastc flag in the basisu command line tool and the m_uastc boolean param
/// on `basis_compressor_params` in the original library
///
/// UASTC encoding result in significantly higher texture quality, but larger files.
pub fn set_basis_format(
&mut self,
basis_format: BasisTextureFormat,
) {
let is_uastc = match basis_format {
BasisTextureFormat::ETC1S => false,
BasisTextureFormat::UASTC4x4 => true,
};
unsafe {
sys::compressor_params_set_uastc(self.0, is_uastc);
}
}
/// Sets the color space the images to be compressed is encoded in
///
/// Setting a linear color space will:
/// * Use linear colorspace metrics (instead of the default sRGB)
/// * By default use linear (not sRGB) mipmap filtering
pub fn set_color_space(
&mut self,
color_space: ColorSpace,
) {
let perceptual = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_perceptual(self.0, perceptual);
}
}
/// Override the mipmap generation color space behavior. This function is not necessary to call
/// if you call [set_color_space] with the correct value.
///
/// * If the color space is sRGB, convert image to linear before filtering, then back to sRGB
/// * If the color space is linear, we keep the image in linear during mipmap filtering
/// (i.e. do not convert to/from sRGB for filtering purposes)
pub fn set_mip_color_space(
&mut self,
color_space: ColorSpace,
) {
let mip_srgb = match color_space {
ColorSpace::Linear => false,
ColorSpace::Srgb => true,
};
unsafe {
sys::compressor_params_set_mip_srgb(self.0, mip_srgb);
}
}
/// Disable backend's selector rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_selector_rdo(
&mut self,
no_selector_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_selector_rdo(self.0, no_selector_rdo);
}
}
/// Disable backend's endpoint rate distortion optimizations (slightly faster, less noisy
/// output, but lower quality per output bit)
pub fn set_no_endpoint_rdo(
&mut self,
no_endpoint_rdo: bool,
) {
unsafe {
sys::compressor_params_set_no_endpoint_rdo(self.0, no_endpoint_rdo);
}
}
/// Enable/disable UASTC RDO post-processing and set UASTC RDO quality scalar to X. Lower
/// values=higher quality/larger LZ compressed files, higher values=lower quality/smaller LZ
/// compressed files. Good range to try is [.2-4]
pub fn set_rdo_uastc(
&mut self,
rdo_uastc_quality_scalar: Option<f32>,
) {
unsafe {
match rdo_uastc_quality_scalar {
Some(quality_scalar) => {
sys::compressor_params_set_rdo_uastc(self.0, true);
sys::compressor_params_set_rdo_uastc_quality_scalar(self.0, quality_scalar);
}
None => {
sys::compressor_params_set_rdo_uastc(self.0, false);
}
}
}
}
/// Generate mipmaps for each source image
///
/// By default, sRGB textures will be converted from sRGB to linear before mipmap filtering.
/// This can be changed by calling [set_color_space] or [set_mip_color_space]
pub fn set_generate_mipmaps(
&mut self,
generate_mipmaps: bool,
) {
unsafe {
sys::compressor_params_set_generate_mipmaps(self.0, generate_mipmaps);
}
}
/// Sets the smallest dimension mipmap that will be generated
pub fn set_mipmap_smallest_dimension(
&mut self,
smallest_dimension: u32,
) {
unsafe {
sys::compressor_params_set_mip_smallest_dimension(self.0, smallest_dimension as _);
}
}
/// Set arbitrary userdata to be included with the basis-universal binary data
pub fn set_userdata(
| print_status_to_stdout: bool,
) {
unsafe { sys::compressor_params_set_status_output(self.0, print_status_to_stdout) } | random_line_split |
manual_map.rs | scrutinee, then_pat, then_body, else_pat, else_body) = match IfLetOrMatch::parse(cx, expr) {
Some(IfLetOrMatch::IfLet(scrutinee, pat, body, Some(r#else))) => (scrutinee, pat, body, None, r#else),
Some(IfLetOrMatch::Match(
scrutinee,
[arm1 @ Arm { guard: None, .. }, arm2 @ Arm { guard: None, .. }],
_,
)) => (scrutinee, arm1.pat, arm1.body, Some(arm2.pat), arm2.body),
_ => return,
};
if in_external_macro(cx.sess(), expr.span) || in_constant(cx, expr.hir_id) {
return;
}
let (scrutinee_ty, ty_ref_count, ty_mutability) =
peel_mid_ty_refs_is_mutable(cx.typeck_results().expr_ty(scrutinee));
if !(is_type_diagnostic_item(cx, scrutinee_ty, sym::Option)
&& is_type_diagnostic_item(cx, cx.typeck_results().expr_ty(expr), sym::Option))
{
return;
}
let expr_ctxt = expr.span.ctxt();
let (some_expr, some_pat, pat_ref_count, is_wild_none) = match (
try_parse_pattern(cx, then_pat, expr_ctxt),
else_pat.map_or(Some(OptionPat::Wild), |p| try_parse_pattern(cx, p, expr_ctxt)),
) {
(Some(OptionPat::Wild), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, true)
},
(Some(OptionPat::None), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, false)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::Wild)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, true)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::None)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, false)
},
_ => return,
};
// Top level or patterns aren't allowed in closures.
if matches!(some_pat.kind, PatKind::Or(_)) {
return;
}
let some_expr = match get_some_expr(cx, some_expr, expr_ctxt) {
Some(expr) => expr,
None => return,
};
// These two lints will go back and forth with each other.
if cx.typeck_results().expr_ty(some_expr) == cx.tcx.types.unit
&& !is_lint_allowed(cx, OPTION_MAP_UNIT_FN, expr.hir_id)
{
return;
}
// `map` won't perform any adjustments.
if !cx.typeck_results().expr_adjustments(some_expr).is_empty() {
return;
}
// Determine which binding mode to use.
let explicit_ref = some_pat.contains_explicit_ref_binding();
let binding_ref = explicit_ref.or_else(|| (ty_ref_count != pat_ref_count).then(|| ty_mutability));
let as_ref_str = match binding_ref {
Some(Mutability::Mut) => ".as_mut()",
Some(Mutability::Not) => ".as_ref()",
None => "",
};
match can_move_expr_to_closure(cx, some_expr) {
Some(captures) => {
// Check if captures the closure will need conflict with borrows made in the scrutinee.
// TODO: check all the references made in the scrutinee expression. This will require interacting
// with the borrow checker. Currently only `<local>[.<field>]*` is checked for.
if let Some(binding_ref_mutability) = binding_ref {
let e = peel_hir_expr_while(scrutinee, |e| match e.kind {
ExprKind::Field(e, _) | ExprKind::AddrOf(_, _, e) => Some(e),
_ => None,
});
if let ExprKind::Path(QPath::Resolved(None, Path { res: Res::Local(l), .. })) = e.kind {
match captures.get(l) {
Some(CaptureKind::Value | CaptureKind::Ref(Mutability::Mut)) => return,
Some(CaptureKind::Ref(Mutability::Not)) if binding_ref_mutability == Mutability::Mut => {
return;
},
Some(CaptureKind::Ref(Mutability::Not)) | None => (),
}
}
}
},
None => return,
};
let mut app = Applicability::MachineApplicable;
// Remove address-of expressions from the scrutinee. Either `as_ref` will be called, or
// it's being passed by value.
let scrutinee = peel_hir_expr_refs(scrutinee).0;
let (scrutinee_str, _) = snippet_with_context(cx, scrutinee.span, expr_ctxt, "..", &mut app);
let scrutinee_str =
if scrutinee.span.ctxt() == expr.span.ctxt() && scrutinee.precedence().order() < PREC_POSTFIX {
format!("({})", scrutinee_str)
} else {
scrutinee_str.into()
};
let body_str = if let PatKind::Binding(annotation, id, some_binding, None) = some_pat.kind {
match can_pass_as_func(cx, id, some_expr) {
Some(func) if func.span.ctxt() == some_expr.span.ctxt() => {
snippet_with_applicability(cx, func.span, "..", &mut app).into_owned()
},
_ => {
if path_to_local_id(some_expr, id)
&& !is_lint_allowed(cx, MATCH_AS_REF, expr.hir_id)
&& binding_ref.is_some()
{
return;
}
// `ref` and `ref mut` annotations were handled earlier.
let annotation = if matches!(annotation, BindingAnnotation::Mutable) {
"mut "
} else {
""
};
format!(
"|{}{}| {}",
annotation,
some_binding,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
},
}
} else if !is_wild_none && explicit_ref.is_none() {
// TODO: handle explicit reference annotations.
format!(
"|{}| {}",
snippet_with_context(cx, some_pat.span, expr_ctxt, "..", &mut app).0,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
} else {
// Refutable bindings and mixed reference annotations can't be handled by `map`.
return;
};
span_lint_and_sugg(
cx,
MANUAL_MAP,
expr.span,
"manual implementation of `Option::map`",
"try this",
if else_pat.is_none() && is_else_clause(cx.tcx, expr) {
format!("{{ {}{}.map({}) }}", scrutinee_str, as_ref_str, body_str)
} else {
format!("{}{}.map({})", scrutinee_str, as_ref_str, body_str)
},
app,
);
}
}
// Checks whether the expression could be passed as a function, or whether a closure is needed.
// Returns the function to be passed to `map` if it exists.
fn can_pass_as_func(cx: &LateContext<'tcx>, binding: HirId, expr: &'tcx Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
match expr.kind {
ExprKind::Call(func, [arg])
if path_to_local_id(arg, binding) && cx.typeck_results().expr_adjustments(arg).is_empty() =>
{
Some(func)
},
_ => None,
}
}
enum OptionPat<'a> {
Wild,
None,
Some {
// The pattern contained in the `Some` tuple.
pattern: &'a Pat<'a>,
// The number of references before the `Some` tuple.
// e.g. `&&Some(_)` has a ref count of 2.
ref_count: usize,
},
}
// Try to parse into a recognized `Option` pattern.
// i.e. `_`, `None`, `Some(..)`, or a reference to any of those.
fn try_parse_pattern(cx: &LateContext<'tcx>, pat: &'tcx Pat<'_>, ctxt: SyntaxContext) -> Option<OptionPat<'tcx>> {
fn f(cx: &LateContext<'tcx>, pat: &'tcx Pat<'_>, ref_count: usize, ctxt: SyntaxContext) -> Option<OptionPat<'tcx>> | {
match pat.kind {
PatKind::Wild => Some(OptionPat::Wild),
PatKind::Ref(pat, _) => f(cx, pat, ref_count + 1, ctxt),
PatKind::Path(ref qpath) if is_lang_ctor(cx, qpath, OptionNone) => Some(OptionPat::None),
PatKind::TupleStruct(ref qpath, [pattern], _)
if is_lang_ctor(cx, qpath, OptionSome) && pat.span.ctxt() == ctxt =>
{
Some(OptionPat::Some { pattern, ref_count })
},
_ => None,
}
} | identifier_body |
|
manual_map.rs | ///
/// ### Example
/// ```rust
/// match Some(0) {
/// Some(x) => Some(x + 1),
/// None => None,
/// };
/// ```
/// Use instead:
/// ```rust
/// Some(0).map(|x| x + 1);
/// ```
#[clippy::version = "1.52.0"]
pub MANUAL_MAP,
style,
"reimplementation of `map`"
}
declare_lint_pass!(ManualMap => [MANUAL_MAP]);
impl LateLintPass<'_> for ManualMap {
#[allow(clippy::too_many_lines)]
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
let (scrutinee, then_pat, then_body, else_pat, else_body) = match IfLetOrMatch::parse(cx, expr) {
Some(IfLetOrMatch::IfLet(scrutinee, pat, body, Some(r#else))) => (scrutinee, pat, body, None, r#else),
Some(IfLetOrMatch::Match(
scrutinee,
[arm1 @ Arm { guard: None, .. }, arm2 @ Arm { guard: None, .. }],
_,
)) => (scrutinee, arm1.pat, arm1.body, Some(arm2.pat), arm2.body),
_ => return,
};
if in_external_macro(cx.sess(), expr.span) || in_constant(cx, expr.hir_id) {
return;
}
let (scrutinee_ty, ty_ref_count, ty_mutability) =
peel_mid_ty_refs_is_mutable(cx.typeck_results().expr_ty(scrutinee));
if !(is_type_diagnostic_item(cx, scrutinee_ty, sym::Option)
&& is_type_diagnostic_item(cx, cx.typeck_results().expr_ty(expr), sym::Option))
{
return;
}
let expr_ctxt = expr.span.ctxt();
let (some_expr, some_pat, pat_ref_count, is_wild_none) = match (
try_parse_pattern(cx, then_pat, expr_ctxt),
else_pat.map_or(Some(OptionPat::Wild), |p| try_parse_pattern(cx, p, expr_ctxt)),
) {
(Some(OptionPat::Wild), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, true)
},
(Some(OptionPat::None), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, false)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::Wild)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, true)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::None)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, false)
},
_ => return,
};
// Top level or patterns aren't allowed in closures.
if matches!(some_pat.kind, PatKind::Or(_)) {
return;
}
let some_expr = match get_some_expr(cx, some_expr, expr_ctxt) {
Some(expr) => expr,
None => return,
};
// These two lints will go back and forth with each other.
if cx.typeck_results().expr_ty(some_expr) == cx.tcx.types.unit
&& !is_lint_allowed(cx, OPTION_MAP_UNIT_FN, expr.hir_id)
{
return;
}
// `map` won't perform any adjustments.
if !cx.typeck_results().expr_adjustments(some_expr).is_empty() {
return;
}
// Determine which binding mode to use.
let explicit_ref = some_pat.contains_explicit_ref_binding();
let binding_ref = explicit_ref.or_else(|| (ty_ref_count != pat_ref_count).then(|| ty_mutability));
let as_ref_str = match binding_ref {
Some(Mutability::Mut) => ".as_mut()",
Some(Mutability::Not) => ".as_ref()",
None => "",
};
match can_move_expr_to_closure(cx, some_expr) {
Some(captures) => {
// Check if captures the closure will need conflict with borrows made in the scrutinee.
// TODO: check all the references made in the scrutinee expression. This will require interacting
// with the borrow checker. Currently only `<local>[.<field>]*` is checked for.
if let Some(binding_ref_mutability) = binding_ref {
let e = peel_hir_expr_while(scrutinee, |e| match e.kind {
ExprKind::Field(e, _) | ExprKind::AddrOf(_, _, e) => Some(e),
_ => None,
});
if let ExprKind::Path(QPath::Resolved(None, Path { res: Res::Local(l), .. })) = e.kind {
match captures.get(l) {
Some(CaptureKind::Value | CaptureKind::Ref(Mutability::Mut)) => return,
Some(CaptureKind::Ref(Mutability::Not)) if binding_ref_mutability == Mutability::Mut => {
return;
},
Some(CaptureKind::Ref(Mutability::Not)) | None => (),
}
}
}
},
None => return,
};
let mut app = Applicability::MachineApplicable;
// Remove address-of expressions from the scrutinee. Either `as_ref` will be called, or
// it's being passed by value.
let scrutinee = peel_hir_expr_refs(scrutinee).0;
let (scrutinee_str, _) = snippet_with_context(cx, scrutinee.span, expr_ctxt, "..", &mut app);
let scrutinee_str =
if scrutinee.span.ctxt() == expr.span.ctxt() && scrutinee.precedence().order() < PREC_POSTFIX {
format!("({})", scrutinee_str)
} else {
scrutinee_str.into()
};
let body_str = if let PatKind::Binding(annotation, id, some_binding, None) = some_pat.kind {
match can_pass_as_func(cx, id, some_expr) {
Some(func) if func.span.ctxt() == some_expr.span.ctxt() => {
snippet_with_applicability(cx, func.span, "..", &mut app).into_owned()
},
_ => {
if path_to_local_id(some_expr, id)
&& !is_lint_allowed(cx, MATCH_AS_REF, expr.hir_id)
&& binding_ref.is_some()
{
return;
}
// `ref` and `ref mut` annotations were handled earlier.
let annotation = if matches!(annotation, BindingAnnotation::Mutable) {
"mut "
} else {
""
};
format!(
"|{}{}| {}",
annotation,
some_binding,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
},
}
} else if !is_wild_none && explicit_ref.is_none() {
// TODO: handle explicit reference annotations.
format!(
"|{}| {}",
snippet_with_context(cx, some_pat.span, expr_ctxt, "..", &mut app).0,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
} else {
// Refutable bindings and mixed reference annotations can't be handled by `map`.
return;
};
span_lint_and_sugg(
cx,
MANUAL_MAP,
expr.span,
"manual implementation of `Option::map`",
"try this",
if else_pat.is_none() && is_else_clause(cx.tcx, expr) {
format!("{{ {}{}.map({}) }}", scrutinee_str, as_ref_str, body_str)
} else {
format!("{}{}.map({})", scrutinee_str, as_ref_str, body_str)
},
app,
);
}
}
// Checks whether the expression could be passed as a function, or whether a closure is needed.
// Returns the function to be passed to `map` if it exists.
fn can_pass_as_func(cx: &LateContext<'tcx>, binding: HirId, expr: &'tcx Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
match expr.kind {
ExprKind::Call(func, [arg])
if path_to_local_id(arg, binding) && cx.typeck_results().expr_adjustments(arg).is_empty() =>
{
Some(func)
},
_ => None,
}
}
enum OptionPat<'a> {
Wild,
None,
Some {
// The pattern contained in the `Some` tuple.
pattern: &'a Pat<'a>,
// The number of references before the `Some` tuple.
// e.g. `&&Some(_)` has a ref count of 2.
ref_count: usize,
},
}
// Try to parse into a recognized `Option` pattern.
// i.e. `_`, `None`, `Some(..)`, or a reference to any of those.
fn try_parse_pattern(cx: &LateContext<'tcx>, pat: &'tcx Pat<'_>, ctxt: SyntaxContext) -> Option<OptionPat<'tcx>> {
fn | f | identifier_name |
|
manual_map.rs | use rustc_hir::{
def::Res, Arm, BindingAnnotation, Block, Expr, ExprKind, HirId, Mutability, Pat, PatKind, Path, QPath,
};
use rustc_lint::{LateContext, LateLintPass, LintContext};
use rustc_middle::lint::in_external_macro;
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::{sym, SyntaxContext};
declare_clippy_lint! {
/// ### What it does
/// Checks for usages of `match` which could be implemented using `map`
///
/// ### Why is this bad?
/// Using the `map` method is clearer and more concise.
///
/// ### Example
/// ```rust
/// match Some(0) {
/// Some(x) => Some(x + 1),
/// None => None,
/// };
/// ```
/// Use instead:
/// ```rust
/// Some(0).map(|x| x + 1);
/// ```
#[clippy::version = "1.52.0"]
pub MANUAL_MAP,
style,
"reimplementation of `map`"
}
declare_lint_pass!(ManualMap => [MANUAL_MAP]);
impl LateLintPass<'_> for ManualMap {
#[allow(clippy::too_many_lines)]
fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
let (scrutinee, then_pat, then_body, else_pat, else_body) = match IfLetOrMatch::parse(cx, expr) {
Some(IfLetOrMatch::IfLet(scrutinee, pat, body, Some(r#else))) => (scrutinee, pat, body, None, r#else),
Some(IfLetOrMatch::Match(
scrutinee,
[arm1 @ Arm { guard: None, .. }, arm2 @ Arm { guard: None, .. }],
_,
)) => (scrutinee, arm1.pat, arm1.body, Some(arm2.pat), arm2.body),
_ => return,
};
if in_external_macro(cx.sess(), expr.span) || in_constant(cx, expr.hir_id) {
return;
}
let (scrutinee_ty, ty_ref_count, ty_mutability) =
peel_mid_ty_refs_is_mutable(cx.typeck_results().expr_ty(scrutinee));
if !(is_type_diagnostic_item(cx, scrutinee_ty, sym::Option)
&& is_type_diagnostic_item(cx, cx.typeck_results().expr_ty(expr), sym::Option))
{
return;
}
let expr_ctxt = expr.span.ctxt();
let (some_expr, some_pat, pat_ref_count, is_wild_none) = match (
try_parse_pattern(cx, then_pat, expr_ctxt),
else_pat.map_or(Some(OptionPat::Wild), |p| try_parse_pattern(cx, p, expr_ctxt)),
) {
(Some(OptionPat::Wild), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, true)
},
(Some(OptionPat::None), Some(OptionPat::Some { pattern, ref_count })) if is_none_expr(cx, then_body) => {
(else_body, pattern, ref_count, false)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::Wild)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, true)
},
(Some(OptionPat::Some { pattern, ref_count }), Some(OptionPat::None)) if is_none_expr(cx, else_body) => {
(then_body, pattern, ref_count, false)
},
_ => return,
};
// Top level or patterns aren't allowed in closures.
if matches!(some_pat.kind, PatKind::Or(_)) {
return;
}
let some_expr = match get_some_expr(cx, some_expr, expr_ctxt) {
Some(expr) => expr,
None => return,
};
// These two lints will go back and forth with each other.
if cx.typeck_results().expr_ty(some_expr) == cx.tcx.types.unit
&& !is_lint_allowed(cx, OPTION_MAP_UNIT_FN, expr.hir_id)
{
return;
}
// `map` won't perform any adjustments.
if !cx.typeck_results().expr_adjustments(some_expr).is_empty() {
return;
}
// Determine which binding mode to use.
let explicit_ref = some_pat.contains_explicit_ref_binding();
let binding_ref = explicit_ref.or_else(|| (ty_ref_count != pat_ref_count).then(|| ty_mutability));
let as_ref_str = match binding_ref {
Some(Mutability::Mut) => ".as_mut()",
Some(Mutability::Not) => ".as_ref()",
None => "",
};
match can_move_expr_to_closure(cx, some_expr) {
Some(captures) => {
// Check if captures the closure will need conflict with borrows made in the scrutinee.
// TODO: check all the references made in the scrutinee expression. This will require interacting
// with the borrow checker. Currently only `<local>[.<field>]*` is checked for.
if let Some(binding_ref_mutability) = binding_ref {
let e = peel_hir_expr_while(scrutinee, |e| match e.kind {
ExprKind::Field(e, _) | ExprKind::AddrOf(_, _, e) => Some(e),
_ => None,
});
if let ExprKind::Path(QPath::Resolved(None, Path { res: Res::Local(l), .. })) = e.kind {
match captures.get(l) {
Some(CaptureKind::Value | CaptureKind::Ref(Mutability::Mut)) => return,
Some(CaptureKind::Ref(Mutability::Not)) if binding_ref_mutability == Mutability::Mut => {
return;
},
Some(CaptureKind::Ref(Mutability::Not)) | None => (),
}
}
}
},
None => return,
};
let mut app = Applicability::MachineApplicable;
// Remove address-of expressions from the scrutinee. Either `as_ref` will be called, or
// it's being passed by value.
let scrutinee = peel_hir_expr_refs(scrutinee).0;
let (scrutinee_str, _) = snippet_with_context(cx, scrutinee.span, expr_ctxt, "..", &mut app);
let scrutinee_str =
if scrutinee.span.ctxt() == expr.span.ctxt() && scrutinee.precedence().order() < PREC_POSTFIX {
format!("({})", scrutinee_str)
} else {
scrutinee_str.into()
};
let body_str = if let PatKind::Binding(annotation, id, some_binding, None) = some_pat.kind {
match can_pass_as_func(cx, id, some_expr) {
Some(func) if func.span.ctxt() == some_expr.span.ctxt() => {
snippet_with_applicability(cx, func.span, "..", &mut app).into_owned()
},
_ => {
if path_to_local_id(some_expr, id)
&& !is_lint_allowed(cx, MATCH_AS_REF, expr.hir_id)
&& binding_ref.is_some()
{
return;
}
// `ref` and `ref mut` annotations were handled earlier.
let annotation = if matches!(annotation, BindingAnnotation::Mutable) {
"mut "
} else {
""
};
format!(
"|{}{}| {}",
annotation,
some_binding,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
},
}
} else if !is_wild_none && explicit_ref.is_none() {
// TODO: handle explicit reference annotations.
format!(
"|{}| {}",
snippet_with_context(cx, some_pat.span, expr_ctxt, "..", &mut app).0,
snippet_with_context(cx, some_expr.span, expr_ctxt, "..", &mut app).0
)
} else {
// Refutable bindings and mixed reference annotations can't be handled by `map`.
return;
};
span_lint_and_sugg(
cx,
MANUAL_MAP,
expr.span,
"manual implementation of `Option::map`",
"try this",
if else_pat.is_none() && is_else_clause(cx.tcx, expr) {
format!("{{ {}{}.map({}) }}", scrutinee_str, as_ref_str, body_str)
} else {
format!("{}{}.map({})", scrutinee_str, as_ref_str, body_str)
},
app,
);
}
}
// Checks whether the expression could be passed as a function, or whether a closure is needed.
// Returns the function to be passed to `map` if it exists.
fn can_pass_as_func(cx: &LateContext<'tcx>, binding: HirId, expr: &'tcx Expr<'_>) -> Option<&'tcx Expr<'tcx>> {
match expr.kind {
ExprKind::Call(func, [arg])
if path_to | };
use rustc_ast::util::parser::PREC_POSTFIX;
use rustc_errors::Applicability;
use rustc_hir::LangItem::{OptionNone, OptionSome}; | random_line_split |
|
util.rs | !("error seeking to offset {} in input file {:?}", offset, path))?;
}
let r = BufReader::new(f);
if let FileRange::Range {len, ..} = range {
Ok(Box::new(r.take(len as u64)))
} else {
Ok(Box::new(r))
}
}
///
/// Execute a command, pipe the contents of a file to stdin, return the output as a `String`
///
pub fn exec_cmdline_pipe_input<S,P>(cmd_path: &str, args: S, input: P, range: FileRange) -> Result<String>
where S: AsRef<str>, P: AsRef<Path>
{
let mut r = ranged_reader(input.as_ref(), range)?;
ensure_command_exists(cmd_path)?;
let args: Vec<&str> = args.as_ref().split_whitespace().collect::<Vec<_>>();
let mut child = Command::new(cmd_path)
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.map_err(context!("unable to execute {}", cmd_path))?;
let stdin = child.stdin.as_mut().unwrap();
io::copy(&mut r, stdin)
.map_err(context!("error copying input to stdin"))?;
let output = child.wait_with_output()
.map_err(context!("error waiting for command {} to exit", cmd_path))?;
Ok(String::from_utf8(output.stdout).unwrap().trim().to_owned())
}
pub fn xz_compress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-T0 {}", path.display())
.map_err(context!("failed to compress {:?}", path))
}
pub fn xz_decompress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-d {}", path.display())
.map_err(context!("failed to decompress {:?}", path))
}
pub fn mount<P: AsRef<Path>>(source: impl AsRef<str>, target: P, options: Option<&str>) -> Result<()> {
let source = source.as_ref();
let target = target.as_ref();
if let Some(options) = options {
cmd!("/usr/bin/mount", "{} {} {}", options, source, target.display())
} else {
cmd!("/usr/bin/mount", "{} {}", source, target.display())
}.map_err(context!("failed to mount {} to {:?}", source, target))
}
pub fn umount<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/umount", "{}", path.display())
.map_err(context!("failed to unmount {:?}", path))
}
pub fn chown_user<P: AsRef<Path>>(path: P) -> Result<()> {
chown(path.as_ref(), 1000, 1000)
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> Result<()> {
let cstr = CString::new(path.as_os_str().as_bytes())
.expect("path contains null byte");
unsafe {
if libc::chown(cstr.as_ptr(), uid, gid) == -1 {
let err = io::Error::last_os_error();
bail!("failed to chown({},{}) {:?}: {}", uid, gid, path, err);
}
}
Ok(())
}
pub fn chmod(path: &Path, mode: u32) -> Result<()> {
let meta = path.metadata()
.map_err(context!("Failed to read metadata from path {:?}", path))?;
meta.permissions().set_mode(mode);
Ok(())
}
/// Rename or move file at `from` to file path `to`
///
/// A wrapper around `fs::rename()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::rename(from, to)
.map_err(context!("error renaming {:?} to {:?}", from, to))
}
/// Create a symlink at path `dst` which points to `src`
///
/// A wrapper around `fs::symlink()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<()> {
let src = src.as_ref();
let dst = dst.as_ref();
unixfs::symlink(src, dst)
.map_err(context!("failed to create symlink {:?} to {:?}", dst, src))
}
/// Read directory `dir` and call closure `f` on each `DirEntry`
pub fn read_directory<F>(dir: impl AsRef<Path>, mut f: F) -> Result<()>
where
F: FnMut(&DirEntry) -> Result<()>
{
let dir = dir.as_ref();
let entries = fs::read_dir(dir)
.map_err(context!("failed to read directory {:?}", dir))?;
for dent in entries {
let dent = dent.map_err(context!("error reading entry from directory {:?}", dir))?;
f(&dent)?;
}
Ok(())
}
/// Remove file at `path` if it exists.
///
/// A wrapper around `fs::remove_file()` which on failure returns an error indicating the path of
/// the file which failed to be removed.
///
pub fn remove_file(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if path.exists() {
fs::remove_file(path)
.map_err(context!("failed to remove file {:?}", path))?;
}
Ok(())
}
/// Create directory `path` if it does not already exist.
///
/// A wrapper around `fs::create_dir_all()` which on failure returns an error indicating the path
/// of the directory which failed to be created.
///
pub fn create_dir(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if !path.exists() {
fs::create_dir_all(path)
.map_err(context!("failed to create directory {:?}", path))?;
}
Ok(())
}
/// Write `contents` to file `path`
///
/// A wrapper around `fs::write()` which on failure returns an error indicating the path
/// of the file which failed to be written.
///
pub fn write_file(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> Result<()> {
let path = path.as_ref();
fs::write(path, contents)
.map_err(context!("failed to write to file {:?}", path))
}
/// Read content of file `path` into a `String`
///
/// A wrapper around `fs::read_to_string()` which on failure returns an error indicating the path
/// of the file which failed to be read.
///
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let path = path.as_ref();
fs::read_to_string(path)
.map_err(context!("failed to read file {:?}", path))
}
/// Copy file at path `from` to a new file at path `to`
///
/// A wrapper around `fs::copy()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::copy(from, to)
.map_err(context!("failed to copy file {:?} to {:?}", from, to))?;
Ok(())
}
fn copy_path(from: &Path, to: &Path, chown_to: Option<(u32,u32)>) -> Result<()> {
if to.exists() {
bail!("destination path {} already exists which is not expected", to.display());
}
let meta = from.metadata()
.map_err(context!("failed to read metadata from source file {:?}", from))?;
if from.is_dir() {
util::create_dir(to)?;
} else {
util::copy_file(&from, &to)?;
}
if let Some((uid,gid)) = chown_to {
chown(to, uid, gid)?;
} else {
chown(to, meta.uid(), meta.gid())?;
}
Ok(())
}
pub fn copy_tree(from_base: &Path, to_base: &Path) -> Result<()> {
_copy_tree(from_base, to_base, None)
}
pub fn copy_tree_with_chown(from_base: &Path, to_base: &Path, chown_to: (u32,u32)) -> Result<()> {
_copy_tree(from_base, to_base, Some(chown_to))
}
fn _copy_tree(from_base: &Path, to_base: &Path, chown_to: Option<(u32,u32)>) -> Result<()> {
for entry in WalkDir::new(from_base) {
let entry = entry.map_err(|e| format_err!("Error walking directory tree: {}", e))?;
let path = entry.path();
let suffix = path.strip_prefix(from_base)
.map_err(|_| format_err!("Failed to strip prefix from {:?}", path))?;
let to = to_base.join(suffix);
if &to != to_base | {
copy_path(path, &to, chown_to)
.map_err(context!("failed to copy {:?} to {:?}", path, to))?;
} | conditional_block |
|
util.rs | let Some(c) = s.chars().next() {
return is_ascii(c) && c.is_alphabetic()
}
false
}
fn search_path(filename: &str) -> Result<PathBuf> {
let path_var = env::var("PATH").unwrap_or("".into());
for mut path in env::split_paths(&path_var) {
path.push(filename);
if path.exists() {
return Ok(path);
}
}
bail!("could not find {} in $PATH", filename)
}
pub fn ensure_command_exists(cmd: &str) -> Result<()> {
let path = Path::new(cmd);
if !path.is_absolute() {
search_path(cmd)?;
return Ok(())
} else if path.exists() {
return Ok(())
}
bail!("cannot execute '{}': command does not exist", cmd)
}
pub fn sha256<P: AsRef<Path>>(path: P) -> Result<String> {
let path = path.as_ref();
let output = cmd_with_output!("/usr/bin/sha256sum", "{}", path.display())
.map_err(context!("failed to calculate sha256 on {:?}", path))?;
let v: Vec<&str> = output.split_whitespace().collect();
Ok(v[0].trim().to_owned())
}
#[derive(Copy,Clone)]
pub enum FileRange {
All,
Offset(usize),
Range{offset: usize, len: usize},
}
fn ranged_reader<P: AsRef<Path>>(path: P, range: FileRange) -> Result<Box<dyn Read>> {
let path = path.as_ref();
let mut f = File::open(path)
.map_err(context!("error opening input file {:?}", path))?;
let offset = match range {
FileRange::All => 0,
FileRange::Offset(n) => n,
FileRange::Range {offset, .. } => offset,
};
if offset > 0 {
f.seek(SeekFrom::Start(offset as u64))
.map_err(context!("error seeking to offset {} in input file {:?}", offset, path))?;
}
let r = BufReader::new(f);
if let FileRange::Range {len, ..} = range {
Ok(Box::new(r.take(len as u64)))
} else {
Ok(Box::new(r))
}
}
///
/// Execute a command, pipe the contents of a file to stdin, return the output as a `String`
///
pub fn exec_cmdline_pipe_input<S,P>(cmd_path: &str, args: S, input: P, range: FileRange) -> Result<String>
where S: AsRef<str>, P: AsRef<Path>
{
let mut r = ranged_reader(input.as_ref(), range)?;
ensure_command_exists(cmd_path)?;
let args: Vec<&str> = args.as_ref().split_whitespace().collect::<Vec<_>>();
let mut child = Command::new(cmd_path)
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.map_err(context!("unable to execute {}", cmd_path))?;
let stdin = child.stdin.as_mut().unwrap();
io::copy(&mut r, stdin)
.map_err(context!("error copying input to stdin"))?;
let output = child.wait_with_output()
.map_err(context!("error waiting for command {} to exit", cmd_path))?;
Ok(String::from_utf8(output.stdout).unwrap().trim().to_owned())
}
pub fn xz_compress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-T0 {}", path.display())
.map_err(context!("failed to compress {:?}", path))
}
pub fn | <P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-d {}", path.display())
.map_err(context!("failed to decompress {:?}", path))
}
pub fn mount<P: AsRef<Path>>(source: impl AsRef<str>, target: P, options: Option<&str>) -> Result<()> {
let source = source.as_ref();
let target = target.as_ref();
if let Some(options) = options {
cmd!("/usr/bin/mount", "{} {} {}", options, source, target.display())
} else {
cmd!("/usr/bin/mount", "{} {}", source, target.display())
}.map_err(context!("failed to mount {} to {:?}", source, target))
}
pub fn umount<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/umount", "{}", path.display())
.map_err(context!("failed to unmount {:?}", path))
}
pub fn chown_user<P: AsRef<Path>>(path: P) -> Result<()> {
chown(path.as_ref(), 1000, 1000)
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> Result<()> {
let cstr = CString::new(path.as_os_str().as_bytes())
.expect("path contains null byte");
unsafe {
if libc::chown(cstr.as_ptr(), uid, gid) == -1 {
let err = io::Error::last_os_error();
bail!("failed to chown({},{}) {:?}: {}", uid, gid, path, err);
}
}
Ok(())
}
pub fn chmod(path: &Path, mode: u32) -> Result<()> {
let meta = path.metadata()
.map_err(context!("Failed to read metadata from path {:?}", path))?;
meta.permissions().set_mode(mode);
Ok(())
}
/// Rename or move file at `from` to file path `to`
///
/// A wrapper around `fs::rename()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::rename(from, to)
.map_err(context!("error renaming {:?} to {:?}", from, to))
}
/// Create a symlink at path `dst` which points to `src`
///
/// A wrapper around `fs::symlink()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<()> {
let src = src.as_ref();
let dst = dst.as_ref();
unixfs::symlink(src, dst)
.map_err(context!("failed to create symlink {:?} to {:?}", dst, src))
}
/// Read directory `dir` and call closure `f` on each `DirEntry`
pub fn read_directory<F>(dir: impl AsRef<Path>, mut f: F) -> Result<()>
where
F: FnMut(&DirEntry) -> Result<()>
{
let dir = dir.as_ref();
let entries = fs::read_dir(dir)
.map_err(context!("failed to read directory {:?}", dir))?;
for dent in entries {
let dent = dent.map_err(context!("error reading entry from directory {:?}", dir))?;
f(&dent)?;
}
Ok(())
}
/// Remove file at `path` if it exists.
///
/// A wrapper around `fs::remove_file()` which on failure returns an error indicating the path of
/// the file which failed to be removed.
///
pub fn remove_file(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if path.exists() {
fs::remove_file(path)
.map_err(context!("failed to remove file {:?}", path))?;
}
Ok(())
}
/// Create directory `path` if it does not already exist.
///
/// A wrapper around `fs::create_dir_all()` which on failure returns an error indicating the path
/// of the directory which failed to be created.
///
pub fn create_dir(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if !path.exists() {
fs::create_dir_all(path)
.map_err(context!("failed to create directory {:?}", path))?;
}
Ok(())
}
/// Write `contents` to file `path`
///
/// A wrapper around `fs::write()` which on failure returns an error indicating the path
/// of the file which failed to be written.
///
pub fn write_file(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> Result<()> {
let path = path.as_ref();
fs::write(path, contents)
.map_err(context!("failed to write to file {:?}", path))
}
/// Read content of file `path` into a `String`
///
/// A wrapper around `fs::read_to_string()` which on failure returns an error indicating the path
/// of the file which failed to be read.
///
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let path = path.as_ref();
fs::read_to_string(path)
.map_err(context!("failed to read file {:?}", path))
}
/// Copy file at path `from` to a new file at path `to`
///
/// A wrapper around `fs::copy()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = | xz_decompress | identifier_name |
util.rs | ) -> Result<String> {
let path = path.as_ref();
let output = cmd_with_output!("/usr/bin/sha256sum", "{}", path.display())
.map_err(context!("failed to calculate sha256 on {:?}", path))?;
let v: Vec<&str> = output.split_whitespace().collect();
Ok(v[0].trim().to_owned())
}
#[derive(Copy,Clone)]
pub enum FileRange {
All,
Offset(usize),
Range{offset: usize, len: usize},
}
fn ranged_reader<P: AsRef<Path>>(path: P, range: FileRange) -> Result<Box<dyn Read>> {
let path = path.as_ref();
let mut f = File::open(path)
.map_err(context!("error opening input file {:?}", path))?;
let offset = match range {
FileRange::All => 0,
FileRange::Offset(n) => n,
FileRange::Range {offset, .. } => offset,
};
if offset > 0 {
f.seek(SeekFrom::Start(offset as u64))
.map_err(context!("error seeking to offset {} in input file {:?}", offset, path))?;
}
let r = BufReader::new(f);
if let FileRange::Range {len, ..} = range {
Ok(Box::new(r.take(len as u64)))
} else {
Ok(Box::new(r))
}
}
///
/// Execute a command, pipe the contents of a file to stdin, return the output as a `String`
///
pub fn exec_cmdline_pipe_input<S,P>(cmd_path: &str, args: S, input: P, range: FileRange) -> Result<String>
where S: AsRef<str>, P: AsRef<Path>
{
let mut r = ranged_reader(input.as_ref(), range)?;
ensure_command_exists(cmd_path)?;
let args: Vec<&str> = args.as_ref().split_whitespace().collect::<Vec<_>>();
let mut child = Command::new(cmd_path)
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.map_err(context!("unable to execute {}", cmd_path))?;
let stdin = child.stdin.as_mut().unwrap();
io::copy(&mut r, stdin)
.map_err(context!("error copying input to stdin"))?;
let output = child.wait_with_output()
.map_err(context!("error waiting for command {} to exit", cmd_path))?;
Ok(String::from_utf8(output.stdout).unwrap().trim().to_owned())
}
pub fn xz_compress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-T0 {}", path.display())
.map_err(context!("failed to compress {:?}", path))
}
pub fn xz_decompress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-d {}", path.display())
.map_err(context!("failed to decompress {:?}", path))
}
pub fn mount<P: AsRef<Path>>(source: impl AsRef<str>, target: P, options: Option<&str>) -> Result<()> {
let source = source.as_ref();
let target = target.as_ref();
if let Some(options) = options {
cmd!("/usr/bin/mount", "{} {} {}", options, source, target.display())
} else {
cmd!("/usr/bin/mount", "{} {}", source, target.display())
}.map_err(context!("failed to mount {} to {:?}", source, target))
}
pub fn umount<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/umount", "{}", path.display())
.map_err(context!("failed to unmount {:?}", path))
}
pub fn chown_user<P: AsRef<Path>>(path: P) -> Result<()> {
chown(path.as_ref(), 1000, 1000)
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> Result<()> {
let cstr = CString::new(path.as_os_str().as_bytes())
.expect("path contains null byte");
unsafe {
if libc::chown(cstr.as_ptr(), uid, gid) == -1 {
let err = io::Error::last_os_error();
bail!("failed to chown({},{}) {:?}: {}", uid, gid, path, err);
}
}
Ok(())
}
pub fn chmod(path: &Path, mode: u32) -> Result<()> {
let meta = path.metadata()
.map_err(context!("Failed to read metadata from path {:?}", path))?;
meta.permissions().set_mode(mode);
Ok(())
}
/// Rename or move file at `from` to file path `to`
///
/// A wrapper around `fs::rename()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::rename(from, to)
.map_err(context!("error renaming {:?} to {:?}", from, to))
}
/// Create a symlink at path `dst` which points to `src`
///
/// A wrapper around `fs::symlink()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<()> {
let src = src.as_ref();
let dst = dst.as_ref();
unixfs::symlink(src, dst)
.map_err(context!("failed to create symlink {:?} to {:?}", dst, src))
}
/// Read directory `dir` and call closure `f` on each `DirEntry`
pub fn read_directory<F>(dir: impl AsRef<Path>, mut f: F) -> Result<()>
where
F: FnMut(&DirEntry) -> Result<()>
{
let dir = dir.as_ref();
let entries = fs::read_dir(dir)
.map_err(context!("failed to read directory {:?}", dir))?;
for dent in entries {
let dent = dent.map_err(context!("error reading entry from directory {:?}", dir))?;
f(&dent)?;
}
Ok(())
}
/// Remove file at `path` if it exists.
///
/// A wrapper around `fs::remove_file()` which on failure returns an error indicating the path of
/// the file which failed to be removed.
///
pub fn remove_file(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if path.exists() {
fs::remove_file(path)
.map_err(context!("failed to remove file {:?}", path))?;
}
Ok(())
}
/// Create directory `path` if it does not already exist.
///
/// A wrapper around `fs::create_dir_all()` which on failure returns an error indicating the path
/// of the directory which failed to be created.
///
pub fn create_dir(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if !path.exists() {
fs::create_dir_all(path)
.map_err(context!("failed to create directory {:?}", path))?;
}
Ok(())
}
/// Write `contents` to file `path`
///
/// A wrapper around `fs::write()` which on failure returns an error indicating the path
/// of the file which failed to be written.
///
pub fn write_file(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> Result<()> {
let path = path.as_ref();
fs::write(path, contents)
.map_err(context!("failed to write to file {:?}", path))
}
/// Read content of file `path` into a `String`
///
/// A wrapper around `fs::read_to_string()` which on failure returns an error indicating the path
/// of the file which failed to be read.
///
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let path = path.as_ref();
fs::read_to_string(path)
.map_err(context!("failed to read file {:?}", path))
}
/// Copy file at path `from` to a new file at path `to`
///
/// A wrapper around `fs::copy()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn copy_file(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::copy(from, to)
.map_err(context!("failed to copy file {:?} to {:?}", from, to))?;
Ok(())
}
fn copy_path(from: &Path, to: &Path, chown_to: Option<(u32,u32)>) -> Result<()> | {
if to.exists() {
bail!("destination path {} already exists which is not expected", to.display());
}
let meta = from.metadata()
.map_err(context!("failed to read metadata from source file {:?}", from))?;
if from.is_dir() {
util::create_dir(to)?;
} else {
util::copy_file(&from, &to)?;
}
if let Some((uid,gid)) = chown_to {
chown(to, uid, gid)?;
} else {
chown(to, meta.uid(), meta.gid())?;
}
Ok(()) | identifier_body |
|
util.rs | is_ascii(c) && (c.is_alphanumeric() || c == '-')
}
fn is_ascii(c: char) -> bool {
c as u32 <= 0x7F
}
pub fn is_first_char_alphabetic(s: &str) -> bool {
if let Some(c) = s.chars().next() {
return is_ascii(c) && c.is_alphabetic()
}
false
}
fn search_path(filename: &str) -> Result<PathBuf> {
let path_var = env::var("PATH").unwrap_or("".into());
for mut path in env::split_paths(&path_var) {
path.push(filename);
if path.exists() {
return Ok(path);
}
}
bail!("could not find {} in $PATH", filename)
}
pub fn ensure_command_exists(cmd: &str) -> Result<()> {
let path = Path::new(cmd);
if !path.is_absolute() {
search_path(cmd)?;
return Ok(())
} else if path.exists() {
return Ok(())
}
bail!("cannot execute '{}': command does not exist", cmd)
}
pub fn sha256<P: AsRef<Path>>(path: P) -> Result<String> {
let path = path.as_ref();
let output = cmd_with_output!("/usr/bin/sha256sum", "{}", path.display())
.map_err(context!("failed to calculate sha256 on {:?}", path))?;
let v: Vec<&str> = output.split_whitespace().collect();
Ok(v[0].trim().to_owned())
}
#[derive(Copy,Clone)]
pub enum FileRange {
All,
Offset(usize),
Range{offset: usize, len: usize},
}
fn ranged_reader<P: AsRef<Path>>(path: P, range: FileRange) -> Result<Box<dyn Read>> {
let path = path.as_ref();
let mut f = File::open(path)
.map_err(context!("error opening input file {:?}", path))?;
let offset = match range {
FileRange::All => 0,
FileRange::Offset(n) => n,
FileRange::Range {offset, .. } => offset,
};
if offset > 0 {
f.seek(SeekFrom::Start(offset as u64))
.map_err(context!("error seeking to offset {} in input file {:?}", offset, path))?;
}
let r = BufReader::new(f);
if let FileRange::Range {len, ..} = range {
Ok(Box::new(r.take(len as u64)))
} else {
Ok(Box::new(r))
}
}
///
/// Execute a command, pipe the contents of a file to stdin, return the output as a `String`
///
pub fn exec_cmdline_pipe_input<S,P>(cmd_path: &str, args: S, input: P, range: FileRange) -> Result<String>
where S: AsRef<str>, P: AsRef<Path>
{
let mut r = ranged_reader(input.as_ref(), range)?;
ensure_command_exists(cmd_path)?;
let args: Vec<&str> = args.as_ref().split_whitespace().collect::<Vec<_>>();
let mut child = Command::new(cmd_path)
.args(args)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::inherit())
.spawn()
.map_err(context!("unable to execute {}", cmd_path))?;
let stdin = child.stdin.as_mut().unwrap();
io::copy(&mut r, stdin)
.map_err(context!("error copying input to stdin"))?;
let output = child.wait_with_output()
.map_err(context!("error waiting for command {} to exit", cmd_path))?;
Ok(String::from_utf8(output.stdout).unwrap().trim().to_owned())
}
pub fn xz_compress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-T0 {}", path.display())
.map_err(context!("failed to compress {:?}", path))
}
pub fn xz_decompress<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/xz", "-d {}", path.display())
.map_err(context!("failed to decompress {:?}", path))
}
pub fn mount<P: AsRef<Path>>(source: impl AsRef<str>, target: P, options: Option<&str>) -> Result<()> {
let source = source.as_ref();
let target = target.as_ref();
if let Some(options) = options {
cmd!("/usr/bin/mount", "{} {} {}", options, source, target.display())
} else {
cmd!("/usr/bin/mount", "{} {}", source, target.display())
}.map_err(context!("failed to mount {} to {:?}", source, target))
}
pub fn umount<P: AsRef<Path>>(path: P) -> Result<()> {
let path = path.as_ref();
cmd!("/usr/bin/umount", "{}", path.display())
.map_err(context!("failed to unmount {:?}", path))
}
pub fn chown_user<P: AsRef<Path>>(path: P) -> Result<()> {
chown(path.as_ref(), 1000, 1000)
}
pub fn chown(path: &Path, uid: u32, gid: u32) -> Result<()> {
let cstr = CString::new(path.as_os_str().as_bytes())
.expect("path contains null byte");
unsafe {
if libc::chown(cstr.as_ptr(), uid, gid) == -1 {
let err = io::Error::last_os_error();
bail!("failed to chown({},{}) {:?}: {}", uid, gid, path, err);
}
}
Ok(())
}
pub fn chmod(path: &Path, mode: u32) -> Result<()> {
let meta = path.metadata()
.map_err(context!("Failed to read metadata from path {:?}", path))?;
meta.permissions().set_mode(mode);
Ok(())
}
/// Rename or move file at `from` to file path `to`
///
/// A wrapper around `fs::rename()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> Result<()> {
let from = from.as_ref();
let to = to.as_ref();
fs::rename(from, to)
.map_err(context!("error renaming {:?} to {:?}", from, to))
}
/// Create a symlink at path `dst` which points to `src`
///
/// A wrapper around `fs::symlink()` which on failure returns an error indicating the source and
/// destination paths.
///
pub fn symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<()> {
let src = src.as_ref();
let dst = dst.as_ref();
unixfs::symlink(src, dst)
.map_err(context!("failed to create symlink {:?} to {:?}", dst, src))
}
/// Read directory `dir` and call closure `f` on each `DirEntry`
pub fn read_directory<F>(dir: impl AsRef<Path>, mut f: F) -> Result<()>
where
F: FnMut(&DirEntry) -> Result<()>
{
let dir = dir.as_ref();
let entries = fs::read_dir(dir)
.map_err(context!("failed to read directory {:?}", dir))?;
for dent in entries {
let dent = dent.map_err(context!("error reading entry from directory {:?}", dir))?;
f(&dent)?;
}
Ok(())
}
/// Remove file at `path` if it exists.
///
/// A wrapper around `fs::remove_file()` which on failure returns an error indicating the path of
/// the file which failed to be removed.
///
pub fn remove_file(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if path.exists() {
fs::remove_file(path)
.map_err(context!("failed to remove file {:?}", path))?;
}
Ok(())
}
/// Create directory `path` if it does not already exist.
///
/// A wrapper around `fs::create_dir_all()` which on failure returns an error indicating the path
/// of the directory which failed to be created.
///
pub fn create_dir(path: impl AsRef<Path>) -> Result<()> {
let path = path.as_ref();
if !path.exists() {
fs::create_dir_all(path)
.map_err(context!("failed to create directory {:?}", path))?;
}
Ok(())
}
/// Write `contents` to file `path`
///
/// A wrapper around `fs::write()` which on failure returns an error indicating the path
/// of the file which failed to be written.
///
pub fn write_file(path: impl AsRef<Path>, contents: impl AsRef<[u8]>) -> Result<()> {
let path = path.as_ref();
fs::write(path, contents)
.map_err(context!("failed to write to file {:?}", path))
}
/// Read content of file `path` into a `String`
///
/// A wrapper around `fs::read_to_string()` which on failure returns an error indicating the path
/// of the file which failed to be read.
///
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let path = path.as_ref();
fs::read_to_string(path)
.map_err(context!("failed to read file {:?}", path))
}
/// Copy file at | fn is_alphanum_or_dash(c: char) -> bool { | random_line_split |
|
train_test_function.py | # 每隔valid_interval个iterations就在validation set上测试一遍
max_iter, # 最大迭代次数 500k
save_interval, # 保存模型的间隔iterations
log_path, # 模型保存的路径
num_runner_threads, # 训练向队列塞入数据的线程数量
load_path=None): # 之前训练好的模型所在的路径
tf.reset_default_graph()
# train和validation中的runner
train_runner = GeneratorRunner(train_gen, train_batch_size * 10)
valid_runner = GeneratorRunner(valid_gen, valid_batch_size * 10)
print('train_runner & valid_runner down successfully!')
is_training = tf.get_variable(name='is_training', dtype=tf.bool, initializer=True, trainable=False)
if train_batch_size == valid_batch_size:
batch_size = train_batch_size
disable_training_op = tf.assign(is_training, False)
enable_training_op = tf.assign(is_training, True)
else:
batch_size = tf.get_variable(name='batch_size', dtype=tf.int32,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
initializer=train_batch_size, trainable=False)
disable_training_op = tf.group(tf.assign(batch_size, valid_batch_size),
tf.assign(is_training, False))
enable_training_op = tf.group(tf.assign(batch_size, train_batch_size),
tf.assign(is_training, True))
# 选择runner从队列中拿出batch_size个元素, (?, 256, 256, 1), (?,)
img_batch, label_batch = queueSelection([valid_runner, train_runner],
tf.cast(is_training, tf.int32), batch_size)
# 构建网络模型
model = model_class(is_training=is_training, data_format='NCHW')
model.build_model(img_batch)
print('build model successfully!')
# 获得模型的loss和accuracy
loss, accuracy = model.build_loss(label_batch)
print('get loss and accuracy successfully!')
train_loss_s = AverageSummary(loss, name='train_loss', num_iterations=train_interval)
train_accuracy_s = AverageSummary(accuracy, name='train_accuracy', num_iterations=train_interval)
valid_loss_s = AverageSummary(loss, name='valid_loss',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
valid_accuracy_s = AverageSummary(accuracy, name='valid_accuracy',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
# iteration从0开始计数, 主要是为了lr的调整而服务
global_step = tf.get_variable(name='global_step', dtype=tf.int32, shape=[],
initializer=tf.constant_initializer(65000), trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
lr_summary = tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer(learning_rate)
# 定义各种操作
minimize_op = optimizer.minimize(loss, global_step)
# 训练
train_op = tf.group(minimize_op, train_loss_s.increment_op, train_accuracy_s.increment_op)
# 测试
increment_valid = tf.group(valid_loss_s.increment_op, valid_accuracy_s.increment_op)
# 初始化
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5000)
print('session start!!!')
train_cnt = 1
valid_cnt = 1
model_save_cnt = 1
# session正式开始!!!
with tf.Session() as sess:
# 初始化全局变量和局部变量
sess.run(init_op)
# 若之前训练好的ckpt文件存在,则加载该文件
if load_path is not None:
print('load_path: ', load_path)
saver.restore(sess, load_path)
# 开启线程,向队列中放入数据
train_runner.start_threads(sess, num_threads=num_runner_threads)
valid_runner.start_threads(sess, num_threads=1)
# 指定Graph保存的路径
writer = tf.summary.FileWriter(log_path + '/LogFile/', sess.graph)
start = sess.run(global_step)
# 先重置,然后计算训练前,测试集上的平均准确率
sess.run(disable_training_op)
sess.run([valid_loss_s.reset_variable_op,
valid_accuracy_s.reset_variable_op,
train_loss_s.reset_variable_op,
train_accuracy_s.reset_variable_op])
_time = time.time()
for i in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
_acc_val = sess.run(valid_accuracy_s.mean_variable)
print('initial accuracy in validation set: ', _acc_val)
print('evaluation time on validation set: ', time.time() - _time, ' seconds')
# 记录valid_loss和valid_accuracy的平均值,同时重置sum_variable的值
valid_loss_s.add_summary(sess, writer, start)
valid_accuracy_s.add_summary(sess, writer, start)
# 开始训练了!!!
sess.run(enable_training_op)
print('network will be evaluated in validation set every %d iterations' % valid_interval)
for i in range(start + 1, max_iter + 1):
# 这里有必要增加一些输出,不然整个训练过程中没有输出,不知道具体进度
sess.run(train_op)
if i % train_interval == 0:
print('train cnt: %f || iterations: %f || train accuracy: %f' % (
train_cnt, i, sess.run(train_accuracy_s.mean_variable)))
train_cnt += 1
train_loss_s.add_summary(sess, writer, i)
train_accuracy_s.add_summary(sess, writer, i)
s = sess.run(lr_summary)
writer.add_summary(s, i)
if i % valid_interval == 0:
sess.run(disable_training_op)
for j in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
print('validation cnt: %d || iterations: %d || validation accuracy: %d' % (
valid_cnt, i, sess.run(valid_accuracy_s.mean_variable)))
valid_cnt += 1
valid_loss_s.add_summary(sess, writer, i)
valid_accuracy_s.add_summary(sess, writer, i)
sess.run(enable_training_op)
if i % save_interval == 0:
print('save cnt: %d || iterations: %d || saved model %d.ckpt' % (model_save_cnt, i, i))
model_save_cnt += 1
saver.save(sess, log_path + '/Model_' + str(i) + '.ckpt')
def test(model_class, # 模型
gen, # generator
load_path, # 训练好的模型所在的路径
batch_size, # 测试时采用的batch_size
ds_size): # 数据集的大小
# 在测试集上测试,输出accuracy和loss
tf.reset_default_graph()
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model = model_class(is_training=False, data_format='NCHW')
model.build_model(img_batch)
loss, accuracy = model.build_loss(label_batch)
loss_summary = AverageSummary(loss, name='loss', num_iterations=float(ds_size) / float(batch_size))
accur | summary = AverageSummary(accuracy, name='accuracy', num_iterations=float(ds_size) / float(batch_size))
increment_op = tf.group(loss_summary.increment_op, accuracy_summary.increment_op)
global_step = tf.get_variable(name='global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op) # 变量初始化
saver.restore(model, load_path) # 加载训练好的模型
runner.start_threads(sess, num_threads=1) # 开启线程
for i in range(0, ds_size, batch_size):
sess.run([increment_op])
mean_loss, mean_accuracy = sess.run([loss_summary.mean_variable, accuracy_summary.mean_variable])
print('Accuracy: ', mean_accuracy, ' | Loss: ', mean_loss)
def get_confusion_matrix(gen, # generator
weight_path, # 训练好的模型所在的路径
data_size, # 数据集的大小
batch_size=1): # 测试时采用的batch_size
tf.reset_default_graph()
assert weight_path is not None, 'weight_path is None, please change weight_path'
# 二分类对应的四种结果
TTCounter = 0
TFCounter = 0
FTCounter = 0
FFCounter = 0
# 含密图像和原始图像的数量
TCounter = 0
FCounter = 0
step_cnt = 0
model = SRNet(is_training=False, data_format='NCHW')
print('SRNet model successfully')
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model_output = model.build_model(img_batch)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer | acy_ | identifier_name |
train_test_function.py | ', num_iterations=train_interval)
train_accuracy_s = AverageSummary(accuracy, name='train_accuracy', num_iterations=train_interval)
valid_loss_s = AverageSummary(loss, name='valid_loss',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
valid_accuracy_s = AverageSummary(accuracy, name='valid_accuracy',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
# iteration从0开始计数, 主要是为了lr的调整而服务
global_step = tf.get_variable(name='global_step', dtype=tf.int32, shape=[],
initializer=tf.constant_initializer(65000), trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
lr_summary = tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer(learning_rate)
# 定义各种操作
minimize_op = optimizer.minimize(loss, global_step)
# 训练
train_op = tf.group(minimize_op, train_loss_s.increment_op, train_accuracy_s.increment_op)
# 测试
increment_valid = tf.group(valid_loss_s.increment_op, valid_accuracy_s.increment_op)
# 初始化
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5000)
print('session start!!!')
train_cnt = 1
valid_cnt = 1
model_save_cnt = 1
# session正式开始!!!
with tf.Session() as sess:
# 初始化全局变量和局部变量
sess.run(init_op)
# 若之前训练好的ckpt文件存在,则加载该文件
if load_path is not None:
print('load_path: ', load_path)
saver.restore(sess, load_path)
# 开启线程,向队列中放入数据
train_runner.start_threads(sess, num_threads=num_runner_threads)
valid_runner.start_threads(sess, num_threads=1)
# 指定Graph保存的路径
writer = tf.summary.FileWriter(log_path + '/LogFile/', sess.graph)
start = sess.run(global_step)
# 先重置,然后计算训练前,测试集上的平均准确率
sess.run(disable_training_op)
sess.run([valid_loss_s.reset_variable_op,
valid_accuracy_s.reset_variable_op,
train_loss_s.reset_variable_op,
train_accuracy_s.reset_variable_op])
_time = time.time()
for i in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
_acc_val = sess.run(valid_accuracy_s.mean_variable)
print('initial accuracy in validation set: ', _acc_val)
print('evaluation time on validation set: ', time.time() - _time, ' seconds')
# 记录valid_loss和valid_accuracy的平均值,同时重置sum_variable的值
valid_loss_s.add_summary(sess, writer, start)
valid_accuracy_s.add_summary(sess, writer, start)
# 开始训练了!!!
sess.run(enable_training_op)
print('network will be evaluated in validation set every %d iterations' % valid_interval)
for i in range(start + 1, max_iter + 1):
# 这里有必要增加一些输出,不然整个训练过程中没有输出,不知道具体进度
sess.run(train_op)
if i % train_interval == 0:
print('train cnt: %f || iterations: %f || train accuracy: %f' % (
train_cnt, i, sess.run(train_accuracy_s.mean_variable)))
train_cnt += 1
train_loss_s.add_summary(sess, writer, i)
train_accuracy_s.add_summary(sess, writer, i)
s = sess.run(lr_summary)
writer.add_summary(s, i)
if i % valid_interval == 0:
sess.run(disable_training_op)
for j in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
print('validation cnt: %d || iterations: %d || validation accuracy: %d' % (
valid_cnt, i, sess.run(valid_accuracy_s.mean_variable)))
valid_cnt += 1
valid_loss_s.add_summary(sess, writer, i)
valid_accuracy_s.add_summary(sess, writer, i)
sess.run(enable_training_op)
if i % save_interval == 0:
print('save cnt: %d || iterations: %d || saved model %d.ckpt' % (model_save_cnt, i, i))
model_save_cnt += 1
saver.save(sess, log_path + '/Model_' + str(i) + '.ckpt')
def test(model_class, # 模型
gen, # generator
load_path, # 训练好的模型所在的路径
batch_size, # 测试时采用的batch_size
ds_size): # 数据集的大小
# 在测试集上测试,输出accuracy和loss
tf.reset_default_graph()
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model = model_class(is_training=False, data_format='NCHW')
model.build_model(img_batch)
loss, accuracy = model.build_loss(label_batch)
loss_summary = AverageSummary(loss, name='loss', num_iterations=float(ds_size) / float(batch_size))
accuracy_summary = AverageSummary(accuracy, name='accuracy', num_iterations=float(ds_size) / float(batch_size))
increment_op = tf.group(loss_summary.increment_op, accuracy_summary.increment_op)
global_step = tf.get_variable(name='global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op) # 变量初始化
saver.restore(model, load_path) # 加载训练好的模型
runner.start_threads(sess, num_threads=1) # 开启线程
for i in range(0, ds_size, batch_size):
sess.run([increment_op])
mean_loss, mean_accuracy = sess.run([loss_summary.mean_variable, accuracy_summary.mean_variable])
print('Accuracy: ', mean_accuracy, ' | Loss: ', mean_loss)
def get_confusion_matrix(gen, # generator
weight_path, # 训练好的模型所在的路径
data_size, # 数据集的大小
batch_size=1): # 测试时采用的batch_size
tf.reset_default_graph()
assert weight_path is not None, 'weight_path is None, please change weight_path'
# 二分类对应的四种结果
TTCounter = 0
TFCounter = 0
FTCounter = 0
FFCounter = 0
# 含密图像和原始图像的数量
TCounter = 0
FCounter = 0
step_cnt = 0
model = SRNet(is_training=False, data_format='NCHW')
print('SRNet model successfully')
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model_output = model.build_model(img_batch)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op)
model_file = tf.train.latest_checkpoint(weight_path)
saver.restore(sess, model_file)
runner.start_threads(sess, num_threads=1)
for step in range(0, data_size, batch_size):
# 拿出来的图像顺序就是cover, stego, cover, stego 这样的顺序
step_cnt += 1
model_label = sess.run(tf.argmax(model_output, 1))[0]
if step_cnt % 2 == 1:
# 原始图像
FCounter += 1
if model_label == 0:
FFCounter += 1
else:
FTCounter += 1
else:
# 含密图像
TCounter += 1
if model_label == 0:
TFCounter += 1
else:
TTCounter += 1
if step_cnt % 50 == 0:
print('cnt: %d || TT: %d/%d, FF: %d/%d, TF: %d/%d, FT: %d/%d || PosCount: %d, NegCount: %d, correct: '
'%.4f' % (step_cnt,
TTCounter, TCounter,
FFCounter, FCounter,
| TFCounter, TCounter,
FTCounter, FCounter,
TCounter, FCounter,
(TTCounter + FFCounter) * 1.0 / step_cnt))
print('\nTOTAL RESULT: ')
print('TT: %d/%d, FF: %d/%d, TF: %d/%d, FT: %d/%d || PosCount: %d, NegCount: %d, correct: %.4f' %
(TTCounter, TCounter,
FFCounter, FCounter,
TFCounter, TCounter,
FTCounter, FCounter,
TCounter, FCounter,
(TTCounter + FFCounter) * 1.0 / step_cnt))
| conditional_block |
|
train_test_function.py | # 每隔valid_interval个iterations就在validation set上测试一遍
max_iter, # 最大迭代次数 500k
save_interval, # 保存模型的间隔iterations
log_path, # 模型保存的路径
num_runner_threads, # 训练向队列塞入数据的线程数量
load_path=None): # 之前训练好的模型所在的路径
tf.reset_default_graph()
# train和validation中的runner
train_runner = GeneratorRunner(train_gen, train_batch_size * 10)
valid_runner = GeneratorRunner(valid_gen, valid_batch_size * 10)
print('train_runner & valid_runner down successfully!')
is_training = tf.get_variable(name='is_training', dtype=tf.bool, initializer=True, trainable=False)
if train_batch_size == valid_batch_size:
batch_size = train_batch_size
disable_training_op = tf.assign(is_training, False)
enable_training_op = tf.assign(is_training, True)
else:
batch_size = tf.get_variable(name='batch_size', dtype=tf.int32,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
initializer=train_batch_size, trainable=False)
disable_training_op = tf.group(tf.assign(batch_size, valid_batch_size),
tf.assign(is_training, False))
enable_training_op = tf.group(tf.assign(batch_size, train_batch_size),
tf.assign(is_training, True))
# 选择runner从队列中拿出batch_size个元素, (?, 256, 256, 1), (?,)
img_batch, label_batch = queueSelection([valid_runner, train_runner],
tf.cast(is_training, tf.int32), batch_size)
# 构建网络模型
model = model_class(is_training=is_training, data_format='NCHW')
model.build_model(img_batch)
print('build model successfully!')
# 获得模型的loss和accuracy
loss, accuracy = model.build_loss(label_batch)
print('get loss and accuracy successfully!')
train_loss_s = AverageSummary(loss, name='train_loss', num_iterations=train_interval)
train_accuracy_s = AverageSummary(accuracy, name='train_accuracy', num_iterations=train_interval)
valid_loss_s = AverageSummary(loss, name='valid_loss',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
valid_accuracy_s = AverageSummary(accuracy, name='valid_accuracy',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
# iteration从0开始计数, 主要是为了lr的调整而服务
global_step = tf.get_variable(name='global_step', dtype=tf.int32, shape=[],
initializer=tf.constant_initializer(65000), trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
lr_summary = tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer(learning_rate)
# 定义各种操作
minimize_op = optimizer.minimize(loss, global_step)
# 训练
train_op = tf.group(minimize_op, train_loss_s.increment_op, train_accuracy_s.increment_op)
# 测试
increment_valid = tf.group(valid_loss_s.increment_op, valid_accuracy_s.increment_op)
# 初始化
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5000)
print('session start!!!')
train_cnt = 1
valid_cnt = 1
model_save_cnt = 1
# session正式开始!!!
with tf.Session() as sess:
# 初始化全局变量和局部变量
sess.run(init_op)
# 若之前训练好的ckpt文件存在,则加载该文件
if load_path is not None:
print('load_path: ', load_path)
saver.restore(sess, load_path)
# 开启线程,向队列中放入数据
train_runner.start_threads(sess, num_threads=num_runner_threads)
valid_runner.start_threads(sess, num_threads=1)
# 指定Graph保存的路径
writer = tf.summary.FileWriter(log_path + '/LogFile/', sess.graph)
start = sess.run(global_step)
# 先重置,然后计算训练前,测试集上的平均准确率
sess.run(disable_training_op)
sess.run([valid_loss_s.reset_variable_op,
valid_accuracy_s.reset_variable_op,
train_loss_s.reset_variable_op,
train_accuracy_s.reset_variable_op])
_time = time.time()
for i in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
_acc_val = sess.run(valid_accuracy_s.mean_variable)
print('initial accuracy in validation set: ', _acc_val)
print('evaluation time on validation set: ', time.time() - _time, ' seconds')
# 记录valid_loss和valid_accuracy的平均值,同时重置sum_variable的值
valid_loss_s.add_summary(sess, writer, start)
valid_accuracy_s.add_summary(sess, writer, start)
# 开始训练了!!!
sess.run(enable_training_op)
print('network will be evaluated in validation set every %d iterations' % valid_interval)
for i in range(start + 1, max_iter + 1):
# 这里有必要增加一些输出,不然整个训练过程中没有输出,不知道具体进度
sess.run(train_op)
if i % train_interval == 0:
print('train cnt: %f || iterations: %f || train accuracy: %f' % (
train_cnt, i, sess.run(train_accuracy_s.mean_variable)))
train_cnt += 1
train_loss_s.add_summary(sess, writer, i)
train_accuracy_s.add_summary(sess, writer, i)
s = sess.run(lr_summary)
writer.add_summary(s, i)
|
for j in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
print('validation cnt: %d || iterations: %d || validation accuracy: %d' % (
valid_cnt, i, sess.run(valid_accuracy_s.mean_variable)))
valid_cnt += 1
valid_loss_s.add_summary(sess, writer, i)
valid_accuracy_s.add_summary(sess, writer, i)
sess.run(enable_training_op)
if i % save_interval == 0:
print('save cnt: %d || iterations: %d || saved model %d.ckpt' % (model_save_cnt, i, i))
model_save_cnt += 1
saver.save(sess, log_path + '/Model_' + str(i) + '.ckpt')
def test(model_class, # 模型
gen, # generator
load_path, # 训练好的模型所在的路径
batch_size, # 测试时采用的batch_size
ds_size): # 数据集的大小
# 在测试集上测试,输出accuracy和loss
tf.reset_default_graph()
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model = model_class(is_training=False, data_format='NCHW')
model.build_model(img_batch)
loss, accuracy = model.build_loss(label_batch)
loss_summary = AverageSummary(loss, name='loss', num_iterations=float(ds_size) / float(batch_size))
accuracy_summary = AverageSummary(accuracy, name='accuracy', num_iterations=float(ds_size) / float(batch_size))
increment_op = tf.group(loss_summary.increment_op, accuracy_summary.increment_op)
global_step = tf.get_variable(name='global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op) # 变量初始化
saver.restore(model, load_path) # 加载训练好的模型
runner.start_threads(sess, num_threads=1) # 开启线程
for i in range(0, ds_size, batch_size):
sess.run([increment_op])
mean_loss, mean_accuracy = sess.run([loss_summary.mean_variable, accuracy_summary.mean_variable])
print('Accuracy: ', mean_accuracy, ' | Loss: ', mean_loss)
def get_confusion_matrix(gen, # generator
weight_path, # 训练好的模型所在的路径
data_size, # 数据集的大小
batch_size=1): # 测试时采用的batch_size
tf.reset_default_graph()
assert weight_path is not None, 'weight_path is None, please change weight_path'
# 二分类对应的四种结果
TTCounter = 0
TFCounter = 0
FTCounter = 0
FFCounter = 0
# 含密图像和原始图像的数量
TCounter = 0
FCounter = 0
step_cnt = 0
model = SRNet(is_training=False, data_format='NCHW')
print('SRNet model successfully')
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model_output = model.build_model(img_batch)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
| if i % valid_interval == 0:
sess.run(disable_training_op) | random_line_split |
train_test_function.py | _op = tf.assign(is_training, False)
enable_training_op = tf.assign(is_training, True)
else:
batch_size = tf.get_variable(name='batch_size', dtype=tf.int32,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
initializer=train_batch_size, trainable=False)
disable_training_op = tf.group(tf.assign(batch_size, valid_batch_size),
tf.assign(is_training, False))
enable_training_op = tf.group(tf.assign(batch_size, train_batch_size),
tf.assign(is_training, True))
# 选择runner从队列中拿出batch_size个元素, (?, 256, 256, 1), (?,)
img_batch, label_batch = queueSelection([valid_runner, train_runner],
tf.cast(is_training, tf.int32), batch_size)
# 构建网络模型
model = model_class(is_training=is_training, data_format='NCHW')
model.build_model(img_batch)
print('build model successfully!')
# 获得模型的loss和accuracy
loss, accuracy = model.build_loss(label_batch)
print('get loss and accuracy successfully!')
train_loss_s = AverageSummary(loss, name='train_loss', num_iterations=train_interval)
train_accuracy_s = AverageSummary(accuracy, name='train_accuracy', num_iterations=train_interval)
valid_loss_s = AverageSummary(loss, name='valid_loss',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
valid_accuracy_s = AverageSummary(accuracy, name='valid_accuracy',
num_iterations=float(valid_ds_size) / float(valid_batch_size))
# iteration从0开始计数, 主要是为了lr的调整而服务
global_step = tf.get_variable(name='global_step', dtype=tf.int32, shape=[],
initializer=tf.constant_initializer(65000), trainable=False)
learning_rate = tf.train.piecewise_constant(global_step, boundaries, values)
lr_summary = tf.summary.scalar('learning_rate', learning_rate)
optimizer = optimizer(learning_rate)
# 定义各种操作
minimize_op = optimizer.minimize(loss, global_step)
# 训练
train_op = tf.group(minimize_op, train_loss_s.increment_op, train_accuracy_s.increment_op)
# 测试
increment_valid = tf.group(valid_loss_s.increment_op, valid_accuracy_s.increment_op)
# 初始化
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=5000)
print('session start!!!')
train_cnt = 1
valid_cnt = 1
model_save_cnt = 1
# session正式开始!!!
with tf.Session() as sess:
# 初始化全局变量和局部变量
sess.run(init_op)
# 若之前训练好的ckpt文件存在,则加载该文件
if load_path is not None:
print('load_path: ', load_path)
saver.restore(sess, load_path)
# 开启线程,向队列中放入数据
train_runner.start_threads(sess, num_threads=num_runner_threads)
valid_runner.start_threads(sess, num_threads=1)
# 指定Graph保存的路径
writer = tf.summary.FileWriter(log_path + '/LogFile/', sess.graph)
start = sess.run(global_step)
# 先重置,然后计算训练前,测试集上的平均准确率
sess.run(disable_training_op)
sess.run([valid_loss_s.reset_variable_op,
valid_accuracy_s.reset_variable_op,
train_loss_s.reset_variable_op,
train_accuracy_s.reset_variable_op])
_time = time.time()
for i in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
_acc_val = sess.run(valid_accuracy_s.mean_variable)
print('initial accuracy in validation set: ', _acc_val)
print('evaluation time on validation set: ', time.time() - _time, ' seconds')
# 记录valid_loss和valid_accuracy的平均值,同时重置sum_variable的值
valid_loss_s.add_summary(sess, writer, start)
valid_accuracy_s.add_summary(sess, writer, start)
# 开始训练了!!!
sess.run(enable_training_op)
print('network will be evaluated in validation set every %d iterations' % valid_interval)
for i in range(start + 1, max_iter + 1):
# 这里有必要增加一些输出,不然整个训练过程中没有输出,不知道具体进度
sess.run(train_op)
if i % train_interval == 0:
print('train cnt: %f || iterations: %f || train accuracy: %f' % (
train_cnt, i, sess.run(train_accuracy_s.mean_variable)))
train_cnt += 1
train_loss_s.add_summary(sess, writer, i)
train_accuracy_s.add_summary(sess, writer, i)
s = sess.run(lr_summary)
writer.add_summary(s, i)
if i % valid_interval == 0:
sess.run(disable_training_op)
for j in range(0, valid_ds_size, valid_batch_size):
sess.run([increment_valid])
print('validation cnt: %d || iterations: %d || validation accuracy: %d' % (
valid_cnt, i, sess.run(valid_accuracy_s.mean_variable)))
valid_cnt += 1
valid_loss_s.add_summary(sess, writer, i)
valid_accuracy_s.add_summary(sess, writer, i)
sess.run(enable_training_op)
if i % save_interval == 0:
print('save cnt: %d || iterations: %d || saved model %d.ckpt' % (model_save_cnt, i, i))
model_save_cnt += 1
saver.save(sess, log_path + '/Model_' + str(i) + '.ckpt')
def test(model_class, # 模型
gen, # generator
load_path, # 训练好的模型所在的路径
batch_size, # 测试时采用的batch_size
ds_size): # 数据集的大小
# 在测试集上测试,输出accuracy和loss
tf.reset_default_graph()
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model = model_class(is_training=False, data_format='NCHW')
model.build_model(img_batch)
loss, accuracy = model.build_loss(label_batch)
loss_summary = AverageSummary(loss, name='loss', num_iterations=float(ds_size) / float(batch_size))
accuracy_summary = AverageSummary(accuracy, name='accuracy', num_iterations=float(ds_size) / float(batch_size))
increment_op = tf.group(loss_summary.increment_op, accuracy_summary.increment_op)
global_step = tf.get_variable(name='global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op) # 变量初始化
saver.restore(model, load_path) # 加载训练好的模型
runner.start_threads(sess, num_threads=1) # 开启线程
for i in range(0, ds_size, batch_size):
sess.run([increment_op])
mean_loss, mean_accuracy = sess.run([loss_summary.mean_variable, accuracy_summary.mean_variable])
print('Accuracy: ', mean_accuracy, ' | Loss: ', mean_loss)
def get_confusion_matrix(gen, # generator
weight_path, # 训练好的模型所在的路径
data_size, # 数据集的大小
batch_size=1): # 测试时采用的batch_size
tf.reset_default_graph()
assert weight_path is not None, 'weight_path is None, please change weight_path'
# 二分类对应的四种结果
TTCounter = 0
TFCounter = 0
FTCounter = 0
FFCounter = 0
# 含密图像和原始图像的数量
TCounter = 0
FCounter = 0
step_cnt = 0
model = SRNet(is_training=False, data_format='NCHW')
print('SRNet model successfully')
runner = GeneratorRunner(gen, batch_size * 10)
img_batch, label_batch = runner.get_batch_inputs(batch_size)
model_output = model.build_model(img_batch)
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
saver = tf.train.Saver(max_to_keep=10000)
with tf.Session() as sess:
sess.run(init_op)
model_file | = tf.train.latest_checkpoint(weight_path)
saver.restore(sess, model_file)
runner.start_threads(sess, num_threads=1)
for step in range(0, data_size, batch_size):
# 拿出来的图像顺序就是cover, stego, cover, stego 这样的顺序
step_cnt += 1
model_label = sess.run(tf.argmax(model_output, 1))[0]
if step_cnt % 2 == 1:
# 原始图像
FCounter += 1
if model_label == 0:
FFCounter += 1
else:
FTCounter += 1
else:
# 含密图像
TCounter += 1
if model_label == 0: | identifier_body |
|
lib.rs | let d = apply_limit(self.d_limit, d_unbounded);
// Calculate the final output by adding together the PID terms, then
// apply the final defined output limit
let output = p + self.integral_term + d;
let output = apply_limit(self.output_limit, output);
// Return the individual term's contributions and the final output
ControlOutput {
p,
i: self.integral_term,
d,
output: output,
}
}
/// Resets the integral term back to zero, this may drastically change the
/// control output.
pub fn reset_integral_term(&mut self) {
self.integral_term = T::zero();
}
}
/// Saturating the input `value` according the absolute `limit` (`-abs(limit) <= output <= abs(limit)`).
fn apply_limit<T: Number>(limit: T, value: T) -> T {
num_traits::clamp(value, -limit.abs(), limit.abs())
}
#[cfg(test)]
mod tests {
use super::Pid;
use crate::ControlOutput;
/// Proportional-only controller operation and limits
#[test]
fn proportional() {
let mut pid = Pid::new(10.0, 100.0);
pid.p(2.0, 100.0).i(0.0, 100.0).d(0.0, 100.0);
assert_eq!(pid.setpoint, 10.0);
// Test simple proportional
assert_eq!(pid.next_control_output(0.0).output, 20.0);
// Test proportional limit
pid.p_limit = 10.0;
assert_eq!(pid.next_control_output(0.0).output, 10.0);
}
/// Derivative-only controller operation and limits
#[test]
fn derivative() {
let mut pid = Pid::new(10.0, 100.0);
pid.p(0.0, 100.0).i(0.0, 100.0).d(2.0, 100.0);
// Test that there's no derivative since it's the first measurement
assert_eq!(pid.next_control_output(0.0).output, 0.0);
// Test that there's now a derivative
assert_eq!(pid.next_control_output(5.0).output, -10.0);
// Test derivative limit
pid.d_limit = 5.0;
assert_eq!(pid.next_control_output(10.0).output, -5.0);
}
/// Integral-only controller operation and limits
#[test]
fn integral() {
let mut pid = Pid::new(10.0, 100.0);
pid.p(0.0, 100.0).i(2.0, 100.0).d(0.0, 100.0);
// Test basic integration
assert_eq!(pid.next_control_output(0.0).output, 20.0);
assert_eq!(pid.next_control_output(0.0).output, 40.0);
assert_eq!(pid.next_control_output(5.0).output, 50.0);
// Test limit
pid.i_limit = 50.0;
assert_eq!(pid.next_control_output(5.0).output, 50.0);
// Test that limit doesn't impede reversal of error integral
assert_eq!(pid.next_control_output(15.0).output, 40.0);
// Test that error integral accumulates negative values
let mut pid2 = Pid::new(-10.0, 100.0);
pid2.p(0.0, 100.0).i(2.0, 100.0).d(0.0, 100.0);
assert_eq!(pid2.next_control_output(0.0).output, -20.0);
assert_eq!(pid2.next_control_output(0.0).output, -40.0);
pid2.i_limit = 50.0;
assert_eq!(pid2.next_control_output(-5.0).output, -50.0);
// Test that limit doesn't impede reversal of error integral
assert_eq!(pid2.next_control_output(-15.0).output, -40.0);
}
/// Checks that a full PID controller's limits work properly through multiple output iterations
#[test]
fn output_limit() {
let mut pid = Pid::new(10.0, 1.0);
pid.p(1.0, 100.0).i(0.0, 100.0).d(0.0, 100.0);
let out = pid.next_control_output(0.0);
assert_eq!(out.p, 10.0); // 1.0 * 10.0
assert_eq!(out.output, 1.0);
let out = pid.next_control_output(20.0);
assert_eq!(out.p, -10.0); // 1.0 * (10.0 - 20.0)
assert_eq!(out.output, -1.0);
}
/// Combined PID operation
#[test]
fn pid() {
let mut pid = Pid::new(10.0, 100.0);
pid.p(1.0, 100.0).i(0.1, 100.0).d(1.0, 100.0);
let out = pid.next_control_output(0.0);
assert_eq!(out.p, 10.0); // 1.0 * 10.0
assert_eq!(out.i, 1.0); // 0.1 * 10.0
assert_eq!(out.d, 0.0); // -(1.0 * 0.0)
assert_eq!(out.output, 11.0);
let out = pid.next_control_output(5.0);
assert_eq!(out.p, 5.0); // 1.0 * 5.0
assert_eq!(out.i, 1.5); // 0.1 * (10.0 + 5.0)
assert_eq!(out.d, -5.0); // -(1.0 * 5.0)
assert_eq!(out.output, 1.5);
let out = pid.next_control_output(11.0);
assert_eq!(out.p, -1.0); // 1.0 * -1.0
assert_eq!(out.i, 1.4); // 0.1 * (10.0 + 5.0 - 1)
assert_eq!(out.d, -6.0); // -(1.0 * 6.0)
assert_eq!(out.output, -5.6);
let out = pid.next_control_output(10.0);
assert_eq!(out.p, 0.0); // 1.0 * 0.0
assert_eq!(out.i, 1.4); // 0.1 * (10.0 + 5.0 - 1.0 + 0.0)
assert_eq!(out.d, 1.0); // -(1.0 * -1.0)
assert_eq!(out.output, 2.4);
}
// NOTE: use for new test in future: /// Full PID operation with mixed float checking to make sure they're equal
/// PID operation with zero'd values, checking to see if different floats equal each other
#[test]
fn floats_zeros() {
let mut pid_f32 = Pid::new(10.0f32, 100.0);
pid_f32.p(0.0, 100.0).i(0.0, 100.0).d(0.0, 100.0);
let mut pid_f64 = Pid::new(10.0, 100.0f64);
pid_f64.p(0.0, 100.0).i(0.0, 100.0).d(0.0, 100.0);
for _ in 0..5 {
assert_eq!(
pid_f32.next_control_output(0.0).output,
pid_f64.next_control_output(0.0).output as f32
);
}
}
// NOTE: use for new test in future: /// Full PID operation with mixed signed integer checking to make sure they're equal
/// PID operation with zero'd values, checking to see if different floats equal each other
#[test] | fn signed_integers_zeros() {
let mut pid_i8 = Pid::new(10i8, 100);
pid_i8.p(0, 100).i(0, 100).d(0, 100);
| random_line_split |
|
lib.rs | 0, 100.0).i(4.5, 100.0).d(0.25, 100.0);
///
/// // Get first output
/// let full_output = full_controller.next_control_output(400.0);
/// ```
///
/// This [`next_control_output`](Self::next_control_output) method is what's used to input new values into the controller to tell it what the current state of the system is. In the examples above it's only being used once, but realistically this will be a hot method. Please see [ControlOutput] for examples of how to handle these outputs; it's quite straight forward and mirrors the values of this structure in some ways.
///
/// The last item of note is that these [`p`](Self::p()), [`i`](Self::i()), and [`d`](Self::d()) methods can be used *during* operation which lets you add and/or modify these controller values if need be.
///
/// # Type Warning
///
/// [Number] is abstract and can be used with anything from a [i32] to an [i128] (as well as user-defined types). Because of this, very small types might overflow during calculation in [`next_control_output`](Self::next_control_output). You probably don't want to use [i8] or user-defined types around that size so keep that in mind when designing your controller.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
#[cfg_attr(feature = "serde", derive(Deserialize, Serialize))]
pub struct Pid<T: Number> {
/// Ideal setpoint to strive for.
pub setpoint: T,
/// Defines the overall output filter limit.
pub output_limit: T,
/// Proportional gain.
pub kp: T,
/// Integral gain.
pub ki: T,
/// Derivative gain.
pub kd: T,
/// Limiter for the proportional term: `-p_limit <= P <= p_limit`.
pub p_limit: T,
/// Limiter for the integral term: `-i_limit <= I <= i_limit`.
pub i_limit: T,
/// Limiter for the derivative term: `-d_limit <= D <= d_limit`.
pub d_limit: T,
/// Last calculated integral value if [Pid::ki] is used.
integral_term: T,
/// Previously found measurement whilst using the [Pid::next_control_output] method.
prev_measurement: Option<T>,
}
/// Output of [controller iterations](Pid::next_control_output) with weights
///
/// # Example
///
/// This structure is simple to use and features three weights: [p](Self::p), [i](Self::i), and [d](Self::d). These can be used to figure out how much each term from [Pid] contributed to the final [output](Self::output) value which should be taken as the final controller output for this iteration:
///
/// ```rust
/// use pid::{Pid, ControlOutput};
///
/// // Setup controller
/// let mut pid = Pid::new(15.0, 100.0);
/// pid.p(10.0, 100.0).i(1.0, 100.0).d(2.0, 100.0);
///
/// // Input an example value and get a report for an output iteration
/// let output = pid.next_control_output(26.2456);
/// println!("P: {}\nI: {}\nD: {}\nFinal Output: {}", output.p, output.i, output.d, output.output);
/// ```
#[derive(Debug, PartialEq, Eq)]
pub struct ControlOutput<T: Number> {
/// Contribution of the P term to the output.
pub p: T,
/// Contribution of the I term to the output.
///
/// This integral term is equal to `sum[error(t) * ki(t)] (for all t)`
pub i: T,
/// Contribution of the D term to the output.
pub d: T,
/// Output of the PID controller.
pub output: T,
}
impl<T> Pid<T>
where
T: Number,
{
/// Creates a new controller with the target setpoint and the output limit
///
/// To set your P, I, and D terms into this controller, please use the following builder methods:
/// - [Self::p()]: Proportional term setting
/// - [Self::i()]: Integral term setting
/// - [Self::d()]: Derivative term setting
pub fn new(setpoint: impl Into<T>, output_limit: impl Into<T>) -> Self {
Self {
setpoint: setpoint.into(),
output_limit: output_limit.into(),
kp: T::zero(),
ki: T::zero(),
kd: T::zero(),
p_limit: T::zero(),
i_limit: T::zero(),
d_limit: T::zero(),
integral_term: T::zero(),
prev_measurement: None,
}
}
/// Sets the [Self::p] term for this controller.
pub fn p(&mut self, gain: impl Into<T>, limit: impl Into<T>) -> &mut Self {
self.kp = gain.into();
self.p_limit = limit.into();
self
}
/// Sets the [Self::i] term for this controller.
pub fn i(&mut self, gain: impl Into<T>, limit: impl Into<T>) -> &mut Self {
self.ki = gain.into();
self.i_limit = limit.into();
self
}
/// Sets the [Self::d] term for this controller.
pub fn d(&mut self, gain: impl Into<T>, limit: impl Into<T>) -> &mut Self {
self.kd = gain.into();
self.d_limit = limit.into();
self
}
/// Sets the [Pid::setpoint] to target for this controller.
pub fn | (&mut self, setpoint: impl Into<T>) -> &mut Self {
self.setpoint = setpoint.into();
self
}
/// Given a new measurement, calculates the next [control output](ControlOutput).
///
/// # Panics
///
/// - If a setpoint has not been set via `update_setpoint()`.
pub fn next_control_output(&mut self, measurement: T) -> ControlOutput<T> {
// Calculate the error between the ideal setpoint and the current
// measurement to compare against
let error = self.setpoint - measurement;
// Calculate the proportional term and limit to it's individual limit
let p_unbounded = error * self.kp;
let p = apply_limit(self.p_limit, p_unbounded);
// Mitigate output jumps when ki(t) != ki(t-1).
// While it's standard to use an error_integral that's a running sum of
// just the error (no ki), because we support ki changing dynamically,
// we store the entire term so that we don't need to remember previous
// ki values.
self.integral_term = self.integral_term + error * self.ki;
// Mitigate integral windup: Don't want to keep building up error
// beyond what i_limit will allow.
self.integral_term = apply_limit(self.i_limit, self.integral_term);
// Mitigate derivative kick: Use the derivative of the measurement
// rather than the derivative of the error.
let d_unbounded = -match self.prev_measurement.as_ref() {
Some(prev_measurement) => measurement - *prev_measurement,
None => T::zero(),
} * self.kd;
self.prev_measurement = Some(measurement);
let d = apply_limit(self.d_limit, d_unbounded);
// Calculate the final output by adding together the PID terms, then
// apply the final defined output limit
let output = p + self.integral_term + d;
let output = apply_limit(self.output_limit, output);
// Return the individual term's contributions and the final output
ControlOutput {
p,
i: self.integral_term,
d,
output: output,
}
}
/// Resets the integral term back to zero, this may drastically change the
/// control output.
pub fn reset_integral_term(&mut self) {
self.integral_term = T::zero();
}
}
/// Saturating the input `value` according the absolute `limit` (`-abs(limit) <= output <= abs(limit)`).
fn apply_limit<T: Number>(limit: T, value: T) -> T {
num_traits::clamp(value, -limit.abs(), limit.abs())
}
#[cfg(test)]
mod tests {
use super::Pid;
use crate::ControlOutput;
/// Proportional-only controller operation and limits
#[test]
fn proportional() {
let mut pid = Pid::new(10.0, 100.0);
pid.p(2.0, 100.0).i(0.0, 100.0).d(0.0, 100.0);
assert_eq!(pid.setpoint, 10.0);
// Test simple proportional
assert_eq!(pid.next_control_output(0.0).output, 20.0);
// Test proportional limit
pid.p_limit = 10.0;
assert_eq!(pid.next_control_output(0.0).output, 10.0);
}
/// | setpoint | identifier_name |
cpu.py | -tile', False), default=16)
# CIRE
o['min-storage'] = oo.pop('min-storage', False)
o['cire-rotate'] = oo.pop('cire-rotate', False)
o['cire-maxpar'] = oo.pop('cire-maxpar', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mingain'] = oo.pop('cire-mingain', cls.CIRE_MINGAIN)
o['cire-schedule'] = oo.pop('cire-schedule', cls.CIRE_SCHEDULE)
# Shared-memory parallelism
o['par-collapse-ncores'] = oo.pop('par-collapse-ncores', cls.PAR_COLLAPSE_NCORES)
o['par-collapse-work'] = oo.pop('par-collapse-work', cls.PAR_COLLAPSE_WORK)
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = oo.pop('par-dynamic-work', cls.PAR_DYNAMIC_WORK)
o['par-nested'] = oo.pop('par-nested', cls.PAR_NESTED)
# Distributed parallelism
o['dist-drop-unwritten'] = oo.pop('dist-drop-unwritten', cls.DIST_DROP_UNWRITTEN)
# Misc
o['expand'] = oo.pop('expand', cls.EXPAND)
o['optcomms'] = oo.pop('optcomms', True)
o['linearize'] = oo.pop('linearize', False)
o['mapify-reduce'] = oo.pop('mapify-reduce', cls.MAPIFY_REDUCE)
o['index-mode'] = oo.pop('index-mode', cls.INDEX_MODE)
o['place-transfers'] = oo.pop('place-transfers', True)
# Recognised but unused by the CPU backend
oo.pop('par-disabled', None)
oo.pop('gpu-fit', None)
oo.pop('gpu-create', None)
if oo:
raise InvalidOperator("Unrecognized optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
# Mode level
class Cpu64NoopOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Shared-memory parallelism
if options['openmp']:
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
return graph
class Cpu64AdvOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
if options['blockeager']:
clusters = blocking(clusters, sregistry, options)
# Reduce flops
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities
clusters = fuse(clusters)
# Reduce flops
clusters = cse(clusters, sregistry, options)
# Blocking to improve data locality
if options['blocklazy']:
clusters = blocking(clusters, sregistry, options)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph, platform=platform)
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Lower BlockDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, **kwargs)
# Parallelism
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_simd(graph)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
# Linearize n-dimensional Indexeds
linearize(graph, **kwargs)
return graph
class Cpu64FsgOperator(Cpu64AdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
BLOCK_EAGER = False
@classmethod
def _normalize_kwargs(cls, **kwargs):
kwargs = super()._normalize_kwargs(**kwargs)
if kwargs['options']['min-storage']:
raise InvalidOperator('You should not use `min-storage` with `advanced-fsg '
' as they work in opposite directions')
return kwargs
class Cpu64CustomOperator(Cpu64OperatorMixin, CustomOperator):
_Target = OmpTarget
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Callback used by `buffering`; it mimics `is_on_device`, which is used
# on device backends
def callback(f):
if f.is_TimeFunction and f.save is not None:
return f.time_dim
else:
return None
return {
'buffering': lambda i: buffering(i, callback, sregistry, options),
'blocking': lambda i: blocking(i, sregistry, options),
'factorize': factorize,
'fission': fission,
'fuse': lambda i: fuse(i, options=options),
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry, options),
'opt-pows': optimize_pows,
'opt-hyperplanes': optimize_hyperplanes,
'topofuse': lambda i: fuse(i, toposort=True, options=options)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
return {
'denormals': avoid_denormals,
'blocking': partial(relax_incr_dimensions, **kwargs),
'parallel': parizer.make_parallel,
'openmp': parizer.make_parallel,
'mpi': partial(mpiize, **kwargs),
'linearize': partial(linearize, **kwargs),
'simd': partial(parizer.make_simd),
'prodders': hoist_prodders,
'init': partial(parizer.initialize, options=options)
}
_known_passes = (
# DSL
'collect-derivs',
# Expressions
'buffering',
# Clusters
'blocking', 'topofuse', 'fission', 'fuse', 'factorize', 'cire-sops',
'cse', 'lift', 'opt-pows', 'opt-hyperplanes',
# IET
'denormals', 'openmp', 'mpi', 'linearize', 'simd', 'prodders',
)
_known_passes_disabled = ('tasking', 'streaming', 'openacc')
assert not (set(_known_passes) & set(_known_passes_disabled))
# Language level
class Cpu64NoopCOperator(Cpu64NoopOperator):
_Target = CTarget
class Cpu64NoopOmpOperator(Cpu64NoopOperator):
_Target = OmpTarget
class Cpu64AdvCOperator(Cpu64AdvOperator):
_Target = CTarget
class Cpu64AdvOmpOperator(Cpu64AdvOperator):
| _Target = OmpTarget | identifier_body |
|
cpu.py | 'Cpu64AdvOmpOperator', 'Cpu64FsgCOperator', 'Cpu64FsgOmpOperator',
'Cpu64CustomOperator']
class Cpu64OperatorMixin(object):
@classmethod
def _normalize_kwargs(cls, **kwargs):
o = {}
oo = kwargs['options']
# Execution modes
o['openmp'] = oo.pop('openmp')
o['mpi'] = oo.pop('mpi')
o['parallel'] = o['openmp'] # Backwards compatibility
# Buffering
o['buf-async-degree'] = oo.pop('buf-async-degree', None)
# Fusion
o['fuse-tasks'] = oo.pop('fuse-tasks', False)
# CSE
o['cse-min-cost'] = oo.pop('cse-min-cost', cls.CSE_MIN_COST)
# Blocking
o['blockinner'] = oo.pop('blockinner', False)
o['blocklevels'] = oo.pop('blocklevels', cls.BLOCK_LEVELS)
o['blockeager'] = oo.pop('blockeager', cls.BLOCK_EAGER)
o['blocklazy'] = oo.pop('blocklazy', not o['blockeager'])
o['blockrelax'] = oo.pop('blockrelax', cls.BLOCK_RELAX)
o['skewing'] = oo.pop('skewing', False)
o['par-tile'] = ParTile(oo.pop('par-tile', False), default=16)
# CIRE
o['min-storage'] = oo.pop('min-storage', False)
o['cire-rotate'] = oo.pop('cire-rotate', False)
o['cire-maxpar'] = oo.pop('cire-maxpar', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mingain'] = oo.pop('cire-mingain', cls.CIRE_MINGAIN)
o['cire-schedule'] = oo.pop('cire-schedule', cls.CIRE_SCHEDULE)
# Shared-memory parallelism
o['par-collapse-ncores'] = oo.pop('par-collapse-ncores', cls.PAR_COLLAPSE_NCORES)
o['par-collapse-work'] = oo.pop('par-collapse-work', cls.PAR_COLLAPSE_WORK)
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = oo.pop('par-dynamic-work', cls.PAR_DYNAMIC_WORK)
o['par-nested'] = oo.pop('par-nested', cls.PAR_NESTED)
# Distributed parallelism
o['dist-drop-unwritten'] = oo.pop('dist-drop-unwritten', cls.DIST_DROP_UNWRITTEN)
# Misc
o['expand'] = oo.pop('expand', cls.EXPAND)
o['optcomms'] = oo.pop('optcomms', True)
o['linearize'] = oo.pop('linearize', False)
o['mapify-reduce'] = oo.pop('mapify-reduce', cls.MAPIFY_REDUCE)
o['index-mode'] = oo.pop('index-mode', cls.INDEX_MODE)
o['place-transfers'] = oo.pop('place-transfers', True)
# Recognised but unused by the CPU backend
oo.pop('par-disabled', None)
oo.pop('gpu-fit', None)
oo.pop('gpu-create', None)
if oo:
raise InvalidOperator("Unrecognized optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
# Mode level
class Cpu64NoopOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Shared-memory parallelism
if options['openmp']:
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
return graph
class Cpu64AdvOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
if options['blockeager']:
clusters = blocking(clusters, sregistry, options)
# Reduce flops
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities
clusters = fuse(clusters)
# Reduce flops
clusters = cse(clusters, sregistry, options)
# Blocking to improve data locality
if options['blocklazy']:
clusters = blocking(clusters, sregistry, options)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph, platform=platform)
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Lower BlockDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, **kwargs)
# Parallelism
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_simd(graph)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
# Linearize n-dimensional Indexeds
linearize(graph, **kwargs)
return graph
class Cpu64FsgOperator(Cpu64AdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
BLOCK_EAGER = False
@classmethod
def _normalize_kwargs(cls, **kwargs):
kwargs = super()._normalize_kwargs(**kwargs)
if kwargs['options']['min-storage']:
raise InvalidOperator('You should not use `min-storage` with `advanced-fsg '
' as they work in opposite directions')
return kwargs
class Cpu64CustomOperator(Cpu64OperatorMixin, CustomOperator):
_Target = OmpTarget
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Callback used by `buffering`; it mimics `is_on_device`, which is used
# on device backends
def callback(f):
if f.is_TimeFunction and f.save is not None:
return f.time_dim
else:
|
return {
'buffering': lambda i: buffering(i, callback, sregistry, options),
'blocking': lambda i: blocking(i, sregistry, options),
'factorize': factorize,
'fission': fission,
'fuse': lambda i: fuse(i, options=options),
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry, options),
'opt-pows': optimize_pows,
'opt-hyperplanes': optimize_hyperplanes,
'topofuse': lambda i: fuse(i, toposort=True, options=options)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
return {
'denormals': avoid_denormals,
'blocking': partial(relax_incr_dimensions, **kwargs),
' | return None | conditional_block |
cpu.py | False), default=16)
# CIRE
o['min-storage'] = oo.pop('min-storage', False)
o['cire-rotate'] = oo.pop('cire-rotate', False)
o['cire-maxpar'] = oo.pop('cire-maxpar', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mingain'] = oo.pop('cire-mingain', cls.CIRE_MINGAIN)
o['cire-schedule'] = oo.pop('cire-schedule', cls.CIRE_SCHEDULE)
# Shared-memory parallelism
o['par-collapse-ncores'] = oo.pop('par-collapse-ncores', cls.PAR_COLLAPSE_NCORES)
o['par-collapse-work'] = oo.pop('par-collapse-work', cls.PAR_COLLAPSE_WORK)
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = oo.pop('par-dynamic-work', cls.PAR_DYNAMIC_WORK)
o['par-nested'] = oo.pop('par-nested', cls.PAR_NESTED)
# Distributed parallelism
o['dist-drop-unwritten'] = oo.pop('dist-drop-unwritten', cls.DIST_DROP_UNWRITTEN)
# Misc
o['expand'] = oo.pop('expand', cls.EXPAND)
o['optcomms'] = oo.pop('optcomms', True)
o['linearize'] = oo.pop('linearize', False)
o['mapify-reduce'] = oo.pop('mapify-reduce', cls.MAPIFY_REDUCE)
o['index-mode'] = oo.pop('index-mode', cls.INDEX_MODE)
o['place-transfers'] = oo.pop('place-transfers', True)
# Recognised but unused by the CPU backend
oo.pop('par-disabled', None)
oo.pop('gpu-fit', None)
oo.pop('gpu-create', None)
if oo:
raise InvalidOperator("Unrecognized optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
# Mode level
class Cpu64NoopOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Shared-memory parallelism
if options['openmp']:
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
return graph
class Cpu64AdvOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
if options['blockeager']:
clusters = blocking(clusters, sregistry, options)
# Reduce flops
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities
clusters = fuse(clusters)
# Reduce flops
clusters = cse(clusters, sregistry, options)
# Blocking to improve data locality
if options['blocklazy']:
clusters = blocking(clusters, sregistry, options)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph, platform=platform)
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Lower BlockDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, **kwargs)
# Parallelism
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_simd(graph)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
# Linearize n-dimensional Indexeds
linearize(graph, **kwargs)
return graph
class Cpu64FsgOperator(Cpu64AdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
BLOCK_EAGER = False
@classmethod
def _normalize_kwargs(cls, **kwargs):
kwargs = super()._normalize_kwargs(**kwargs)
if kwargs['options']['min-storage']:
raise InvalidOperator('You should not use `min-storage` with `advanced-fsg '
' as they work in opposite directions')
return kwargs
class Cpu64CustomOperator(Cpu64OperatorMixin, CustomOperator):
_Target = OmpTarget
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Callback used by `buffering`; it mimics `is_on_device`, which is used
# on device backends
def callback(f):
if f.is_TimeFunction and f.save is not None:
return f.time_dim
else:
return None
return {
'buffering': lambda i: buffering(i, callback, sregistry, options),
'blocking': lambda i: blocking(i, sregistry, options),
'factorize': factorize,
'fission': fission,
'fuse': lambda i: fuse(i, options=options),
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry, options),
'opt-pows': optimize_pows,
'opt-hyperplanes': optimize_hyperplanes,
'topofuse': lambda i: fuse(i, toposort=True, options=options)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
return {
'denormals': avoid_denormals,
'blocking': partial(relax_incr_dimensions, **kwargs),
'parallel': parizer.make_parallel,
'openmp': parizer.make_parallel,
'mpi': partial(mpiize, **kwargs),
'linearize': partial(linearize, **kwargs),
'simd': partial(parizer.make_simd),
'prodders': hoist_prodders,
'init': partial(parizer.initialize, options=options)
}
_known_passes = (
# DSL
'collect-derivs',
# Expressions
'buffering',
# Clusters
'blocking', 'topofuse', 'fission', 'fuse', 'factorize', 'cire-sops',
'cse', 'lift', 'opt-pows', 'opt-hyperplanes',
# IET
'denormals', 'openmp', 'mpi', 'linearize', 'simd', 'prodders',
)
_known_passes_disabled = ('tasking', 'streaming', 'openacc')
assert not (set(_known_passes) & set(_known_passes_disabled))
# Language level
class Cpu64NoopCOperator(Cpu64NoopOperator):
_Target = CTarget
class Cpu64NoopOmpOperator(Cpu64NoopOperator):
_Target = OmpTarget
class Cpu64AdvCOperator(Cpu64AdvOperator):
_Target = CTarget
|
class Cpu64AdvOmpOperator(Cpu64AdvOperator):
_Target = OmpTarget
| random_line_split |
|
cpu.py | 'Cpu64AdvOmpOperator', 'Cpu64FsgCOperator', 'Cpu64FsgOmpOperator',
'Cpu64CustomOperator']
class Cpu64OperatorMixin(object):
@classmethod
def _normalize_kwargs(cls, **kwargs):
o = {}
oo = kwargs['options']
# Execution modes
o['openmp'] = oo.pop('openmp')
o['mpi'] = oo.pop('mpi')
o['parallel'] = o['openmp'] # Backwards compatibility
# Buffering
o['buf-async-degree'] = oo.pop('buf-async-degree', None)
# Fusion
o['fuse-tasks'] = oo.pop('fuse-tasks', False)
# CSE
o['cse-min-cost'] = oo.pop('cse-min-cost', cls.CSE_MIN_COST)
# Blocking
o['blockinner'] = oo.pop('blockinner', False)
o['blocklevels'] = oo.pop('blocklevels', cls.BLOCK_LEVELS)
o['blockeager'] = oo.pop('blockeager', cls.BLOCK_EAGER)
o['blocklazy'] = oo.pop('blocklazy', not o['blockeager'])
o['blockrelax'] = oo.pop('blockrelax', cls.BLOCK_RELAX)
o['skewing'] = oo.pop('skewing', False)
o['par-tile'] = ParTile(oo.pop('par-tile', False), default=16)
# CIRE
o['min-storage'] = oo.pop('min-storage', False)
o['cire-rotate'] = oo.pop('cire-rotate', False)
o['cire-maxpar'] = oo.pop('cire-maxpar', False)
o['cire-ftemps'] = oo.pop('cire-ftemps', False)
o['cire-mingain'] = oo.pop('cire-mingain', cls.CIRE_MINGAIN)
o['cire-schedule'] = oo.pop('cire-schedule', cls.CIRE_SCHEDULE)
# Shared-memory parallelism
o['par-collapse-ncores'] = oo.pop('par-collapse-ncores', cls.PAR_COLLAPSE_NCORES)
o['par-collapse-work'] = oo.pop('par-collapse-work', cls.PAR_COLLAPSE_WORK)
o['par-chunk-nonaffine'] = oo.pop('par-chunk-nonaffine', cls.PAR_CHUNK_NONAFFINE)
o['par-dynamic-work'] = oo.pop('par-dynamic-work', cls.PAR_DYNAMIC_WORK)
o['par-nested'] = oo.pop('par-nested', cls.PAR_NESTED)
# Distributed parallelism
o['dist-drop-unwritten'] = oo.pop('dist-drop-unwritten', cls.DIST_DROP_UNWRITTEN)
# Misc
o['expand'] = oo.pop('expand', cls.EXPAND)
o['optcomms'] = oo.pop('optcomms', True)
o['linearize'] = oo.pop('linearize', False)
o['mapify-reduce'] = oo.pop('mapify-reduce', cls.MAPIFY_REDUCE)
o['index-mode'] = oo.pop('index-mode', cls.INDEX_MODE)
o['place-transfers'] = oo.pop('place-transfers', True)
# Recognised but unused by the CPU backend
oo.pop('par-disabled', None)
oo.pop('gpu-fit', None)
oo.pop('gpu-create', None)
if oo:
raise InvalidOperator("Unrecognized optimization options: [%s]"
% ", ".join(list(oo)))
kwargs['options'].update(o)
return kwargs
# Mode level
class Cpu64NoopOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Shared-memory parallelism
if options['openmp']:
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
return graph
class Cpu64AdvOperator(Cpu64OperatorMixin, CoreOperator):
@classmethod
@timed_pass(name='specializing.DSL')
def _specialize_dsl(cls, expressions, **kwargs):
expressions = collect_derivatives(expressions)
return expressions
@classmethod
@timed_pass(name='specializing.Clusters')
def _specialize_clusters(cls, clusters, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Toposort+Fusion (the former to expose more fusion opportunities)
clusters = fuse(clusters, toposort=True)
# Hoist and optimize Dimension-invariant sub-expressions
clusters = cire(clusters, 'invariants', sregistry, options, platform)
clusters = Lift().process(clusters)
# Blocking to improve data locality
if options['blockeager']:
clusters = blocking(clusters, sregistry, options)
# Reduce flops
clusters = cire(clusters, 'sops', sregistry, options, platform)
clusters = factorize(clusters)
clusters = optimize_pows(clusters)
# The previous passes may have created fusion opportunities
clusters = fuse(clusters)
# Reduce flops
clusters = cse(clusters, sregistry, options)
# Blocking to improve data locality
if options['blocklazy']:
clusters = blocking(clusters, sregistry, options)
return clusters
@classmethod
@timed_pass(name='specializing.IET')
def _specialize_iet(cls, graph, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
# Flush denormal numbers
avoid_denormals(graph, platform=platform)
# Distributed-memory parallelism
mpiize(graph, **kwargs)
# Lower BlockDimensions so that blocks of arbitrary shape may be used
relax_incr_dimensions(graph, **kwargs)
# Parallelism
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
parizer.make_simd(graph)
parizer.make_parallel(graph)
parizer.initialize(graph, options=options)
# Misc optimizations
hoist_prodders(graph)
# Symbol definitions
cls._Target.DataManager(**kwargs).process(graph)
# Linearize n-dimensional Indexeds
linearize(graph, **kwargs)
return graph
class Cpu64FsgOperator(Cpu64AdvOperator):
"""
Operator with performance optimizations tailored "For small grids" ("Fsg").
"""
BLOCK_EAGER = False
@classmethod
def _normalize_kwargs(cls, **kwargs):
kwargs = super()._normalize_kwargs(**kwargs)
if kwargs['options']['min-storage']:
raise InvalidOperator('You should not use `min-storage` with `advanced-fsg '
' as they work in opposite directions')
return kwargs
class Cpu64CustomOperator(Cpu64OperatorMixin, CustomOperator):
_Target = OmpTarget
@classmethod
def _make_dsl_passes_mapper(cls, **kwargs):
return {
'collect-derivs': collect_derivatives,
}
@classmethod
def _make_clusters_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
sregistry = kwargs['sregistry']
# Callback used by `buffering`; it mimics `is_on_device`, which is used
# on device backends
def | (f):
if f.is_TimeFunction and f.save is not None:
return f.time_dim
else:
return None
return {
'buffering': lambda i: buffering(i, callback, sregistry, options),
'blocking': lambda i: blocking(i, sregistry, options),
'factorize': factorize,
'fission': fission,
'fuse': lambda i: fuse(i, options=options),
'lift': lambda i: Lift().process(cire(i, 'invariants', sregistry,
options, platform)),
'cire-sops': lambda i: cire(i, 'sops', sregistry, options, platform),
'cse': lambda i: cse(i, sregistry, options),
'opt-pows': optimize_pows,
'opt-hyperplanes': optimize_hyperplanes,
'topofuse': lambda i: fuse(i, toposort=True, options=options)
}
@classmethod
def _make_iet_passes_mapper(cls, **kwargs):
options = kwargs['options']
platform = kwargs['platform']
compiler = kwargs['compiler']
sregistry = kwargs['sregistry']
parizer = cls._Target.Parizer(sregistry, options, platform, compiler)
return {
'denormals': avoid_denormals,
'blocking': partial(relax_incr_dimensions, **kwargs),
| callback | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.