file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
createConstructor.ts | function getExpressionStatement(propertyName: string, right: object) {
return {
type: 'ExpressionStatement',
expression: {
type: 'AssignmentExpression',
operator: '=',
left: {
type: 'MemberExpression',
object: {
type: 'ThisExpression',
},
property: {
type: 'Identifier',
name: propertyName,
},
},
right,
},
};
}
function getArrayExpressionStatement(propertyName: string, right: object) {
return {
type: 'ExpressionStatement',
expression: {
type: 'AssignmentExpression',
operator: '=',
left: {
type: 'MemberExpression',
object: {
type: 'ThisExpression', | name: propertyName,
},
},
right: {
type: 'ArrayExpression',
elements: [right],
},
},
};
}
function getExpressionStatementForProperty(propertyName: string, propertyTypeName: string | number) {
const numericValue = typeof propertyTypeName === 'string' ? parseFloat(propertyTypeName) : propertyTypeName;
if (!isNaN(numericValue)) {
return getExpressionStatement(propertyName, {
type: 'NumericLiteral',
value: numericValue,
});
} else if (typeof propertyTypeName === 'string' && propertyTypeName[0] === "'") {
return getExpressionStatement(propertyName, {
type: 'StringLiteral',
value: propertyTypeName.slice(1, -1),
});
} else if (propertyTypeName === 'TSStringKeyword') {
return getExpressionStatement(propertyName, {
type: 'StringLiteral',
value: '',
});
} else if (propertyTypeName === 'TSNumberKeyword') {
return getExpressionStatement(propertyName, {
type: 'NumericLiteral',
value: 0,
});
} else if (propertyTypeName === 'TSBooleanKeyword') {
return getExpressionStatement(propertyName, {
type: 'BooleanLiteral',
value: false,
});
} else {
return getExpressionStatement(propertyName, {
type: 'NewExpression',
callee: {
type: 'Identifier',
name: propertyTypeName,
},
});
}
}
function getExpressionStatementForArrayProperty(propertyName: string, propertyTypeName: string | number) {
const numericValue = typeof propertyTypeName === 'string' ? parseFloat(propertyTypeName) : propertyTypeName;
if (!isNaN(numericValue)) {
return getArrayExpressionStatement(propertyName, {
type: 'NumericLiteral',
value: numericValue,
});
} else if (typeof propertyTypeName === 'string' && propertyTypeName[0] === "'") {
return getArrayExpressionStatement(propertyName, {
type: 'StringLiteral',
value: propertyTypeName.slice(1, -1),
});
} else if (propertyTypeName === 'TSStringKeyword') {
return getArrayExpressionStatement(propertyName, {
type: 'StringLiteral',
value: '',
});
} else if (propertyTypeName === 'TSNumberKeyword') {
return getArrayExpressionStatement(propertyName, {
type: 'NumericLiteral',
value: 0,
});
} else if (propertyTypeName === 'TSBooleanKeyword') {
return getArrayExpressionStatement(propertyName, {
type: 'BooleanLiteral',
value: false,
});
} else {
return getArrayExpressionStatement(propertyName, {
type: 'NewExpression',
callee: {
type: 'Identifier',
name: propertyTypeName,
},
});
}
}
export default function createConstructor(
propertyNameToTypeNameMap: { [key: string]: string | number },
hasSuperClass: boolean
) {
const initializers = Object.entries(propertyNameToTypeNameMap).map(([propertyName, propertyTypeName]) => {
if (typeof propertyTypeName === 'string' && propertyTypeName.endsWith('[]')) {
return getExpressionStatementForArrayProperty(propertyName, propertyTypeName.slice(0, -2));
} else {
return getExpressionStatementForProperty(propertyName, propertyTypeName);
}
});
const superCall = {
type: 'ExpressionStatement',
expression: {
type: 'CallExpression',
callee: {
type: 'Super',
},
},
};
return {
type: 'ClassMethod',
kind: 'constructor',
key: {
type: 'Identifier',
name: 'constructor',
},
computed: false,
id: null,
generator: false,
async: false,
params: [],
static: false,
body: {
type: 'BlockStatement',
body: [...(hasSuperClass ? [superCall] : []), ...initializers],
},
};
} | },
property: {
type: 'Identifier', |
models.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointCollectionSourceProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointCollectionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<RestorePointCollectionSourceProperties>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "restorePointCollectionId", default, skip_serializing_if = "Option::is_none")]
pub restore_point_collection_id: Option<String>,
#[serde(rename = "restorePoints", default, skip_serializing_if = "Vec::is_empty")]
pub restore_points: Vec<RestorePoint>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointCollection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RestorePointCollectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointCollectionUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RestorePointCollectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointCollectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RestorePointCollection>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointSourceMetadata {
#[serde(rename = "hardwareProfile", default, skip_serializing_if = "Option::is_none")]
pub hardware_profile: Option<HardwareProfile>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<RestorePointSourceVmStorageProfile>,
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<OsProfile>,
#[serde(rename = "diagnosticsProfile", default, skip_serializing_if = "Option::is_none")]
pub diagnostics_profile: Option<DiagnosticsProfile>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<String>,
#[serde(rename = "vmId", default, skip_serializing_if = "Option::is_none")]
pub vm_id: Option<String>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<SecurityProfile>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointSourceVmStorageProfile {
#[serde(rename = "osDisk", default, skip_serializing_if = "Option::is_none")]
pub os_disk: Option<RestorePointSourceVmosDisk>,
#[serde(rename = "dataDisks", default, skip_serializing_if = "Vec::is_empty")]
pub data_disks: Vec<RestorePointSourceVmDataDisk>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointSourceVmosDisk {
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<restore_point_source_vmos_disk::OsType>,
#[serde(rename = "encryptionSettings", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings: Option<DiskEncryptionSettings>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<ManagedDiskParameters>,
#[serde(rename = "diskRestorePoint", default, skip_serializing_if = "Option::is_none")]
pub disk_restore_point: Option<ApiEntityReference>,
}
pub mod restore_point_source_vmos_disk {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointSourceVmDataDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub lun: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<ManagedDiskParameters>,
#[serde(rename = "diskRestorePoint", default, skip_serializing_if = "Option::is_none")]
pub disk_restore_point: Option<ApiEntityReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointProvisioningDetails {
#[serde(rename = "creationTime", default, skip_serializing_if = "Option::is_none")]
pub creation_time: Option<String>,
#[serde(rename = "totalUsedSizeInBytes", default, skip_serializing_if = "Option::is_none")]
pub total_used_size_in_bytes: Option<i64>,
#[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")]
pub status_code: Option<i32>,
#[serde(rename = "statusMessage", default, skip_serializing_if = "Option::is_none")]
pub status_message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePoint {
#[serde(flatten)]
pub proxy_resource: ProxyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RestorePointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RestorePointProperties {
#[serde(rename = "excludeDisks", default, skip_serializing_if = "Vec::is_empty")]
pub exclude_disks: Vec<ApiEntityReference>,
#[serde(rename = "sourceMetadata", default, skip_serializing_if = "Option::is_none")]
pub source_metadata: Option<RestorePointSourceMetadata>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "consistencyMode", default, skip_serializing_if = "Option::is_none")]
pub consistency_mode: Option<restore_point_properties::ConsistencyMode>,
#[serde(rename = "provisioningDetails", default, skip_serializing_if = "Option::is_none")]
pub provisioning_details: Option<RestorePointProvisioningDetails>,
}
pub mod restore_point_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConsistencyMode {
CrashConsistent,
FileSystemConsistent,
ApplicationConsistent,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeOperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ComputeOperationValue>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeOperationValue {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<ComputeOperationValueDisplay>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ComputeOperationValueDisplay {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DisallowedConfiguration {
#[serde(rename = "vmDiskType", default, skip_serializing_if = "Option::is_none")]
pub vm_disk_type: Option<disallowed_configuration::VmDiskType>,
}
pub mod disallowed_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmDiskType {
None,
Unmanaged,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RetrieveBootDiagnosticsDataResult {
#[serde(rename = "consoleScreenshotBlobUri", default, skip_serializing_if = "Option::is_none")]
pub console_screenshot_blob_uri: Option<String>,
#[serde(rename = "serialConsoleLogBlobUri", default, skip_serializing_if = "Option::is_none")]
pub serial_console_log_blob_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGenerationType {
V1,
V2,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceViewStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<instance_view_status::Level>,
#[serde(rename = "displayStatus", default, skip_serializing_if = "Option::is_none")]
pub display_status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time: Option<String>,
}
pub mod instance_view_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Level {
Info,
Warning,
Error,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailabilitySetProperties {
#[serde(rename = "platformUpdateDomainCount", default, skip_serializing_if = "Option::is_none")]
pub platform_update_domain_count: Option<i32>,
#[serde(rename = "platformFaultDomainCount", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain_count: Option<i32>,
#[serde(rename = "virtualMachines", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines: Vec<SubResource>,
#[serde(rename = "proximityPlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub proximity_placement_group: Option<SubResource>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AvailabilitySetSkuType {
Classic,
Aligned,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailabilitySet {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AvailabilitySetProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailabilitySetUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AvailabilitySetProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailabilitySetListResult {
pub value: Vec<AvailabilitySet>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResourceWithColocationStatus {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(rename = "colocationStatus", default, skip_serializing_if = "Option::is_none")]
pub colocation_status: Option<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProximityPlacementGroupProperties {
#[serde(rename = "proximityPlacementGroupType", default, skip_serializing_if = "Option::is_none")]
pub proximity_placement_group_type: Option<proximity_placement_group_properties::ProximityPlacementGroupType>,
#[serde(rename = "virtualMachines", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines: Vec<SubResourceWithColocationStatus>,
#[serde(rename = "virtualMachineScaleSets", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machine_scale_sets: Vec<SubResourceWithColocationStatus>,
#[serde(rename = "availabilitySets", default, skip_serializing_if = "Vec::is_empty")]
pub availability_sets: Vec<SubResourceWithColocationStatus>,
#[serde(rename = "colocationStatus", default, skip_serializing_if = "Option::is_none")]
pub colocation_status: Option<InstanceViewStatus>,
}
pub mod proximity_placement_group_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProximityPlacementGroupType {
Standard,
Ultra,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProximityPlacementGroup {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ProximityPlacementGroupProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProximityPlacementGroupUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProximityPlacementGroupListResult {
pub value: Vec<ProximityPlacementGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostGroupInstanceView {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub hosts: Vec<DedicatedHostInstanceViewWithName>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostGroupProperties {
#[serde(rename = "platformFaultDomainCount")]
pub platform_fault_domain_count: i32,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub hosts: Vec<SubResourceReadOnly>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<DedicatedHostGroupInstanceView>,
#[serde(rename = "supportAutomaticPlacement", default, skip_serializing_if = "Option::is_none")]
pub support_automatic_placement: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostGroup {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DedicatedHostGroupProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostGroupUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DedicatedHostGroupProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostGroupListResult {
pub value: Vec<DedicatedHostGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DedicatedHostLicenseType {
None,
#[serde(rename = "Windows_Server_Hybrid")]
WindowsServerHybrid,
#[serde(rename = "Windows_Server_Perpetual")]
WindowsServerPerpetual,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostAllocatableVm {
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostAvailableCapacity {
#[serde(rename = "allocatableVMs", default, skip_serializing_if = "Vec::is_empty")]
pub allocatable_v_ms: Vec<DedicatedHostAllocatableVm>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostInstanceView {
#[serde(rename = "assetId", default, skip_serializing_if = "Option::is_none")]
pub asset_id: Option<String>,
#[serde(rename = "availableCapacity", default, skip_serializing_if = "Option::is_none")]
pub available_capacity: Option<DedicatedHostAvailableCapacity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostInstanceViewWithName {
#[serde(flatten)]
pub dedicated_host_instance_view: DedicatedHostInstanceView,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostProperties {
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "autoReplaceOnFailure", default, skip_serializing_if = "Option::is_none")]
pub auto_replace_on_failure: Option<bool>,
#[serde(rename = "hostId", default, skip_serializing_if = "Option::is_none")]
pub host_id: Option<String>,
#[serde(rename = "virtualMachines", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines: Vec<SubResourceReadOnly>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<DedicatedHostLicenseType>,
#[serde(rename = "provisioningTime", default, skip_serializing_if = "Option::is_none")]
pub provisioning_time: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<DedicatedHostInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHost {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DedicatedHostProperties>,
pub sku: Sku,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DedicatedHostProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DedicatedHostListResult {
pub value: Vec<DedicatedHost>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationGroupInstanceView {
#[serde(rename = "capacityReservations", default, skip_serializing_if = "Vec::is_empty")]
pub capacity_reservations: Vec<CapacityReservationInstanceViewWithName>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationGroupProperties {
#[serde(rename = "capacityReservations", default, skip_serializing_if = "Vec::is_empty")]
pub capacity_reservations: Vec<SubResourceReadOnly>,
#[serde(rename = "virtualMachinesAssociated", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines_associated: Vec<SubResourceReadOnly>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<CapacityReservationGroupInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationGroup {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CapacityReservationGroupProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationGroupUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CapacityReservationGroupProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationGroupListResult {
pub value: Vec<CapacityReservationGroup>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationInstanceView {
#[serde(rename = "utilizationInfo", default, skip_serializing_if = "Option::is_none")]
pub utilization_info: Option<CapacityReservationUtilization>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationUtilization {
#[serde(rename = "virtualMachinesAllocated", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines_allocated: Vec<SubResourceReadOnly>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationInstanceViewWithName {
#[serde(flatten)]
pub capacity_reservation_instance_view: CapacityReservationInstanceView,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationProperties {
#[serde(rename = "reservationId", default, skip_serializing_if = "Option::is_none")]
pub reservation_id: Option<String>,
#[serde(rename = "virtualMachinesAssociated", default, skip_serializing_if = "Vec::is_empty")]
pub virtual_machines_associated: Vec<SubResourceReadOnly>,
#[serde(rename = "provisioningTime", default, skip_serializing_if = "Option::is_none")]
pub provisioning_time: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<CapacityReservationInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservation {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CapacityReservationProperties>,
pub sku: Sku,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CapacityReservationProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationListResult {
pub value: Vec<CapacityReservation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKeyGenerateKeyPairResult {
#[serde(rename = "privateKey")]
pub private_key: String,
#[serde(rename = "publicKey")]
pub public_key: String,
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKeyResourceProperties {
#[serde(rename = "publicKey", default, skip_serializing_if = "Option::is_none")]
pub public_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKeyResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SshPublicKeyResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKeyUpdateResource {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SshPublicKeyResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKeysGroupListResult {
pub value: Vec<SshPublicKeyResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSize {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "numberOfCores", default, skip_serializing_if = "Option::is_none")]
pub number_of_cores: Option<i32>,
#[serde(rename = "osDiskSizeInMB", default, skip_serializing_if = "Option::is_none")]
pub os_disk_size_in_mb: Option<i32>,
#[serde(rename = "resourceDiskSizeInMB", default, skip_serializing_if = "Option::is_none")]
pub resource_disk_size_in_mb: Option<i32>,
#[serde(rename = "memoryInMB", default, skip_serializing_if = "Option::is_none")]
pub memory_in_mb: Option<i32>,
#[serde(rename = "maxDataDiskCount", default, skip_serializing_if = "Option::is_none")]
pub max_data_disk_count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSizeListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualMachineSize>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionImageProperties {
#[serde(rename = "operatingSystem")]
pub operating_system: String,
#[serde(rename = "computeRole")]
pub compute_role: String,
#[serde(rename = "handlerSchema")]
pub handler_schema: String,
#[serde(rename = "vmScaleSetEnabled", default, skip_serializing_if = "Option::is_none")]
pub vm_scale_set_enabled: Option<bool>,
#[serde(rename = "supportsMultipleExtensions", default, skip_serializing_if = "Option::is_none")]
pub supports_multiple_extensions: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionImage {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineExtensionImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImageResource {
#[serde(flatten)]
pub sub_resource: SubResource,
pub name: String,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionInstanceView {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub substatuses: Vec<InstanceViewStatus>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionProperties {
#[serde(rename = "forceUpdateTag", default, skip_serializing_if = "Option::is_none")]
pub force_update_tag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(rename = "autoUpgradeMinorVersion", default, skip_serializing_if = "Option::is_none")]
pub auto_upgrade_minor_version: Option<bool>,
#[serde(rename = "enableAutomaticUpgrade", default, skip_serializing_if = "Option::is_none")]
pub enable_automatic_upgrade: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<serde_json::Value>,
#[serde(rename = "protectedSettings", default, skip_serializing_if = "Option::is_none")]
pub protected_settings: Option<serde_json::Value>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<VirtualMachineExtensionInstanceView>,
#[serde(rename = "suppressFailures", default, skip_serializing_if = "Option::is_none")]
pub suppress_failures: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionUpdateProperties {
#[serde(rename = "forceUpdateTag", default, skip_serializing_if = "Option::is_none")]
pub force_update_tag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(rename = "autoUpgradeMinorVersion", default, skip_serializing_if = "Option::is_none")]
pub auto_upgrade_minor_version: Option<bool>,
#[serde(rename = "enableAutomaticUpgrade", default, skip_serializing_if = "Option::is_none")]
pub enable_automatic_upgrade: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<serde_json::Value>,
#[serde(rename = "protectedSettings", default, skip_serializing_if = "Option::is_none")]
pub protected_settings: Option<serde_json::Value>,
#[serde(rename = "suppressFailures", default, skip_serializing_if = "Option::is_none")]
pub suppress_failures: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtension {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmExtension {
#[serde(flatten)]
pub sub_resource_read_only: SubResourceReadOnly,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineExtensionUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmExtensionUpdate {
#[serde(flatten)]
pub sub_resource_read_only: SubResourceReadOnly,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineExtensionUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualMachineExtension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmExtensionsListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<VirtualMachineScaleSetVmExtension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineSoftwarePatchProperties {
#[serde(rename = "patchId", default, skip_serializing_if = "Option::is_none")]
pub patch_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "kbId", default, skip_serializing_if = "Option::is_none")]
pub kb_id: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub classifications: Vec<String>,
#[serde(rename = "rebootBehavior", default, skip_serializing_if = "Option::is_none")]
pub reboot_behavior: Option<virtual_machine_software_patch_properties::RebootBehavior>,
#[serde(rename = "activityId", default, skip_serializing_if = "Option::is_none")]
pub activity_id: Option<String>,
#[serde(rename = "publishedDate", default, skip_serializing_if = "Option::is_none")]
pub published_date: Option<String>,
#[serde(rename = "lastModifiedDateTime", default, skip_serializing_if = "Option::is_none")]
pub last_modified_date_time: Option<String>,
#[serde(rename = "assessmentState", default, skip_serializing_if = "Option::is_none")]
pub assessment_state: Option<virtual_machine_software_patch_properties::AssessmentState>,
}
pub mod virtual_machine_software_patch_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RebootBehavior {
Unknown,
NeverReboots,
AlwaysRequiresReboot,
CanRequestReboot,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AssessmentState {
Unknown,
Available,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineAssessPatchesResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<virtual_machine_assess_patches_result::Status>,
#[serde(rename = "assessmentActivityId", default, skip_serializing_if = "Option::is_none")]
pub assessment_activity_id: Option<String>,
#[serde(rename = "rebootPending", default, skip_serializing_if = "Option::is_none")]
pub reboot_pending: Option<bool>,
#[serde(rename = "criticalAndSecurityPatchCount", default, skip_serializing_if = "Option::is_none")]
pub critical_and_security_patch_count: Option<i32>,
#[serde(rename = "otherPatchCount", default, skip_serializing_if = "Option::is_none")]
pub other_patch_count: Option<i32>,
#[serde(rename = "startDateTime", default, skip_serializing_if = "Option::is_none")]
pub start_date_time: Option<String>,
#[serde(rename = "availablePatches", default, skip_serializing_if = "Vec::is_empty")]
pub available_patches: Vec<VirtualMachineSoftwarePatchProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
pub mod virtual_machine_assess_patches_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Unknown,
InProgress,
Failed,
Succeeded,
CompletedWithWarnings,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineInstallPatchesParameters {
#[serde(rename = "maximumDuration", default, skip_serializing_if = "Option::is_none")]
pub maximum_duration: Option<String>,
#[serde(rename = "rebootSetting")]
pub reboot_setting: virtual_machine_install_patches_parameters::RebootSetting,
#[serde(rename = "windowsParameters", default, skip_serializing_if = "Option::is_none")]
pub windows_parameters: Option<WindowsParameters>,
#[serde(rename = "linuxParameters", default, skip_serializing_if = "Option::is_none")]
pub linux_parameters: Option<LinuxParameters>,
}
pub mod virtual_machine_install_patches_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RebootSetting {
IfRequired,
Never,
Always,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WindowsParameters {
#[serde(rename = "classificationsToInclude", default, skip_serializing_if = "Vec::is_empty")]
pub classifications_to_include: Vec<String>,
#[serde(rename = "kbNumbersToInclude", default, skip_serializing_if = "Vec::is_empty")]
pub kb_numbers_to_include: Vec<String>,
#[serde(rename = "kbNumbersToExclude", default, skip_serializing_if = "Vec::is_empty")]
pub kb_numbers_to_exclude: Vec<String>,
#[serde(rename = "excludeKbsRequiringReboot", default, skip_serializing_if = "Option::is_none")]
pub exclude_kbs_requiring_reboot: Option<bool>,
#[serde(rename = "maxPatchPublishDate", default, skip_serializing_if = "Option::is_none")]
pub max_patch_publish_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinuxParameters {
#[serde(rename = "classificationsToInclude", default, skip_serializing_if = "Vec::is_empty")]
pub classifications_to_include: Vec<String>,
#[serde(rename = "packageNameMasksToInclude", default, skip_serializing_if = "Vec::is_empty")]
pub package_name_masks_to_include: Vec<String>,
#[serde(rename = "packageNameMasksToExclude", default, skip_serializing_if = "Vec::is_empty")]
pub package_name_masks_to_exclude: Vec<String>,
#[serde(rename = "maintenanceRunId", default, skip_serializing_if = "Option::is_none")]
pub maintenance_run_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineInstallPatchesResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<virtual_machine_install_patches_result::Status>,
#[serde(rename = "installationActivityId", default, skip_serializing_if = "Option::is_none")]
pub installation_activity_id: Option<String>,
#[serde(rename = "rebootStatus", default, skip_serializing_if = "Option::is_none")]
pub reboot_status: Option<virtual_machine_install_patches_result::RebootStatus>,
#[serde(rename = "maintenanceWindowExceeded", default, skip_serializing_if = "Option::is_none")]
pub maintenance_window_exceeded: Option<bool>,
#[serde(rename = "excludedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub excluded_patch_count: Option<i32>,
#[serde(rename = "notSelectedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub not_selected_patch_count: Option<i32>,
#[serde(rename = "pendingPatchCount", default, skip_serializing_if = "Option::is_none")]
pub pending_patch_count: Option<i32>,
#[serde(rename = "installedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub installed_patch_count: Option<i32>,
#[serde(rename = "failedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub failed_patch_count: Option<i32>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub patches: Vec<PatchInstallationDetail>,
#[serde(rename = "startDateTime", default, skip_serializing_if = "Option::is_none")]
pub start_date_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
pub mod virtual_machine_install_patches_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Unknown,
InProgress,
Failed,
Succeeded,
CompletedWithWarnings,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum RebootStatus {
Unknown,
NotNeeded,
Required,
Started,
Failed,
Completed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PatchInstallationDetail {
#[serde(rename = "patchId", default, skip_serializing_if = "Option::is_none")]
pub patch_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "kbId", default, skip_serializing_if = "Option::is_none")]
pub kb_id: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub classifications: Vec<String>,
#[serde(rename = "installationState", default, skip_serializing_if = "Option::is_none")]
pub installation_state: Option<patch_installation_detail::InstallationState>,
}
pub mod patch_installation_detail {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum InstallationState {
Unknown,
Installed,
Failed,
Excluded,
NotSelected,
Pending,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PurchasePlan {
pub publisher: String,
pub name: String,
pub product: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsDiskImage {
#[serde(rename = "operatingSystem")]
pub operating_system: os_disk_image::OperatingSystem,
}
pub mod os_disk_image {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperatingSystem {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataDiskImage {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub lun: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutomaticOsUpgradeProperties {
#[serde(rename = "automaticOSUpgradeSupported")]
pub automatic_os_upgrade_supported: bool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImageFeature {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImageProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<PurchasePlan>,
#[serde(rename = "osDiskImage", default, skip_serializing_if = "Option::is_none")]
pub os_disk_image: Option<OsDiskImage>,
#[serde(rename = "dataDiskImages", default, skip_serializing_if = "Vec::is_empty")]
pub data_disk_images: Vec<DataDiskImage>,
#[serde(rename = "automaticOSUpgradeProperties", default, skip_serializing_if = "Option::is_none")]
pub automatic_os_upgrade_properties: Option<AutomaticOsUpgradeProperties>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<HyperVGenerationType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disallowed: Option<DisallowedConfiguration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub features: Vec<VirtualMachineImageFeature>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineImage {
#[serde(flatten)]
pub virtual_machine_image_resource: VirtualMachineImageResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UsageName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "localizedValue", default, skip_serializing_if = "Option::is_none")]
pub localized_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Usage {
pub unit: usage::Unit,
#[serde(rename = "currentValue")]
pub current_value: i32,
pub limit: i64,
pub name: UsageName,
}
pub mod usage {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Unit {
Count,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ListUsagesResult {
pub value: Vec<Usage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineReimageParameters {
#[serde(rename = "tempDisk", default, skip_serializing_if = "Option::is_none")]
pub temp_disk: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineCaptureParameters {
#[serde(rename = "vhdPrefix")]
pub vhd_prefix: String,
#[serde(rename = "destinationContainerName")]
pub destination_container_name: String,
#[serde(rename = "overwriteVhds")]
pub overwrite_vhds: bool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineCaptureResult {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(rename = "$schema", default, skip_serializing_if = "Option::is_none")]
pub schema: Option<String>,
#[serde(rename = "contentVersion", default, skip_serializing_if = "Option::is_none")]
pub content_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub parameters: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub resources: Vec<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Plan {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub product: Option<String>,
#[serde(rename = "promotionCode", default, skip_serializing_if = "Option::is_none")]
pub promotion_code: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HardwareProfile {
#[serde(rename = "vmSize", default, skip_serializing_if = "Option::is_none")]
pub vm_size: Option<hardware_profile::VmSize>,
#[serde(rename = "vmSizeProperties", default, skip_serializing_if = "Option::is_none")]
pub vm_size_properties: Option<VmSizeProperties>,
}
pub mod hardware_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VmSize {
#[serde(rename = "Basic_A0")]
BasicA0,
#[serde(rename = "Basic_A1")]
BasicA1,
#[serde(rename = "Basic_A2")]
BasicA2,
#[serde(rename = "Basic_A3")]
BasicA3,
#[serde(rename = "Basic_A4")]
BasicA4,
#[serde(rename = "Standard_A0")]
StandardA0,
#[serde(rename = "Standard_A1")]
StandardA1,
#[serde(rename = "Standard_A2")]
StandardA2,
#[serde(rename = "Standard_A3")]
StandardA3,
#[serde(rename = "Standard_A4")]
StandardA4,
#[serde(rename = "Standard_A5")]
StandardA5,
#[serde(rename = "Standard_A6")]
StandardA6,
#[serde(rename = "Standard_A7")]
StandardA7,
#[serde(rename = "Standard_A8")]
StandardA8,
#[serde(rename = "Standard_A9")]
StandardA9,
#[serde(rename = "Standard_A10")]
StandardA10,
#[serde(rename = "Standard_A11")]
StandardA11,
#[serde(rename = "Standard_A1_v2")]
StandardA1V2,
#[serde(rename = "Standard_A2_v2")]
StandardA2V2,
#[serde(rename = "Standard_A4_v2")]
StandardA4V2,
#[serde(rename = "Standard_A8_v2")]
StandardA8V2,
#[serde(rename = "Standard_A2m_v2")]
StandardA2mV2,
#[serde(rename = "Standard_A4m_v2")]
StandardA4mV2,
#[serde(rename = "Standard_A8m_v2")]
StandardA8mV2,
#[serde(rename = "Standard_B1s")]
StandardB1s,
#[serde(rename = "Standard_B1ms")]
StandardB1ms,
#[serde(rename = "Standard_B2s")]
StandardB2s,
#[serde(rename = "Standard_B2ms")]
StandardB2ms,
#[serde(rename = "Standard_B4ms")]
StandardB4ms,
#[serde(rename = "Standard_B8ms")]
StandardB8ms,
#[serde(rename = "Standard_D1")]
StandardD1,
#[serde(rename = "Standard_D2")]
StandardD2,
#[serde(rename = "Standard_D3")]
StandardD3,
#[serde(rename = "Standard_D4")]
StandardD4,
#[serde(rename = "Standard_D11")]
StandardD11,
#[serde(rename = "Standard_D12")]
StandardD12,
#[serde(rename = "Standard_D13")]
StandardD13,
#[serde(rename = "Standard_D14")]
StandardD14,
#[serde(rename = "Standard_D1_v2")]
StandardD1V2,
#[serde(rename = "Standard_D2_v2")]
StandardD2V2,
#[serde(rename = "Standard_D3_v2")]
StandardD3V2,
#[serde(rename = "Standard_D4_v2")]
StandardD4V2,
#[serde(rename = "Standard_D5_v2")]
StandardD5V2,
#[serde(rename = "Standard_D2_v3")]
StandardD2V3,
#[serde(rename = "Standard_D4_v3")]
StandardD4V3,
#[serde(rename = "Standard_D8_v3")]
StandardD8V3,
#[serde(rename = "Standard_D16_v3")]
StandardD16V3,
#[serde(rename = "Standard_D32_v3")]
StandardD32V3,
#[serde(rename = "Standard_D64_v3")]
StandardD64V3,
#[serde(rename = "Standard_D2s_v3")]
StandardD2sV3,
#[serde(rename = "Standard_D4s_v3")]
StandardD4sV3,
#[serde(rename = "Standard_D8s_v3")]
StandardD8sV3,
#[serde(rename = "Standard_D16s_v3")]
StandardD16sV3,
#[serde(rename = "Standard_D32s_v3")]
StandardD32sV3,
#[serde(rename = "Standard_D64s_v3")]
StandardD64sV3,
#[serde(rename = "Standard_D11_v2")]
StandardD11V2,
#[serde(rename = "Standard_D12_v2")]
StandardD12V2,
#[serde(rename = "Standard_D13_v2")]
StandardD13V2,
#[serde(rename = "Standard_D14_v2")]
StandardD14V2,
#[serde(rename = "Standard_D15_v2")]
StandardD15V2,
#[serde(rename = "Standard_DS1")]
StandardDs1,
#[serde(rename = "Standard_DS2")]
StandardDs2,
#[serde(rename = "Standard_DS3")]
StandardDs3,
#[serde(rename = "Standard_DS4")]
StandardDs4,
#[serde(rename = "Standard_DS11")]
StandardDs11,
#[serde(rename = "Standard_DS12")]
StandardDs12,
#[serde(rename = "Standard_DS13")]
StandardDs13,
#[serde(rename = "Standard_DS14")]
StandardDs14,
#[serde(rename = "Standard_DS1_v2")]
StandardDs1V2,
#[serde(rename = "Standard_DS2_v2")]
StandardDs2V2,
#[serde(rename = "Standard_DS3_v2")]
StandardDs3V2,
#[serde(rename = "Standard_DS4_v2")]
StandardDs4V2,
#[serde(rename = "Standard_DS5_v2")]
StandardDs5V2,
#[serde(rename = "Standard_DS11_v2")]
StandardDs11V2,
#[serde(rename = "Standard_DS12_v2")]
StandardDs12V2,
#[serde(rename = "Standard_DS13_v2")]
StandardDs13V2,
#[serde(rename = "Standard_DS14_v2")]
StandardDs14V2,
#[serde(rename = "Standard_DS15_v2")]
StandardDs15V2,
#[serde(rename = "Standard_DS13-4_v2")]
StandardDs134V2,
#[serde(rename = "Standard_DS13-2_v2")]
StandardDs132V2,
#[serde(rename = "Standard_DS14-8_v2")]
StandardDs148V2,
#[serde(rename = "Standard_DS14-4_v2")]
StandardDs144V2,
#[serde(rename = "Standard_E2_v3")]
StandardE2V3,
#[serde(rename = "Standard_E4_v3")]
StandardE4V3,
#[serde(rename = "Standard_E8_v3")]
StandardE8V3,
#[serde(rename = "Standard_E16_v3")]
StandardE16V3,
#[serde(rename = "Standard_E32_v3")]
StandardE32V3,
#[serde(rename = "Standard_E64_v3")]
StandardE64V3,
#[serde(rename = "Standard_E2s_v3")]
StandardE2sV3,
#[serde(rename = "Standard_E4s_v3")]
StandardE4sV3,
#[serde(rename = "Standard_E8s_v3")]
StandardE8sV3,
#[serde(rename = "Standard_E16s_v3")]
StandardE16sV3,
#[serde(rename = "Standard_E32s_v3")]
StandardE32sV3,
#[serde(rename = "Standard_E64s_v3")]
StandardE64sV3,
#[serde(rename = "Standard_E32-16_v3")]
StandardE3216V3,
#[serde(rename = "Standard_E32-8s_v3")]
StandardE328sV3,
#[serde(rename = "Standard_E64-32s_v3")]
StandardE6432sV3,
#[serde(rename = "Standard_E64-16s_v3")]
StandardE6416sV3,
#[serde(rename = "Standard_F1")]
StandardF1,
#[serde(rename = "Standard_F2")]
StandardF2,
#[serde(rename = "Standard_F4")]
StandardF4,
#[serde(rename = "Standard_F8")]
StandardF8,
#[serde(rename = "Standard_F16")]
StandardF16,
#[serde(rename = "Standard_F1s")]
StandardF1s,
#[serde(rename = "Standard_F2s")]
StandardF2s,
#[serde(rename = "Standard_F4s")]
StandardF4s,
#[serde(rename = "Standard_F8s")]
StandardF8s,
#[serde(rename = "Standard_F16s")]
StandardF16s,
#[serde(rename = "Standard_F2s_v2")]
StandardF2sV2,
#[serde(rename = "Standard_F4s_v2")]
StandardF4sV2,
#[serde(rename = "Standard_F8s_v2")]
StandardF8sV2,
#[serde(rename = "Standard_F16s_v2")]
StandardF16sV2,
#[serde(rename = "Standard_F32s_v2")]
StandardF32sV2,
#[serde(rename = "Standard_F64s_v2")]
StandardF64sV2,
#[serde(rename = "Standard_F72s_v2")]
StandardF72sV2,
#[serde(rename = "Standard_G1")]
StandardG1,
#[serde(rename = "Standard_G2")]
StandardG2,
#[serde(rename = "Standard_G3")]
StandardG3,
#[serde(rename = "Standard_G4")]
StandardG4,
#[serde(rename = "Standard_G5")]
StandardG5,
#[serde(rename = "Standard_GS1")]
StandardGs1,
#[serde(rename = "Standard_GS2")]
StandardGs2,
#[serde(rename = "Standard_GS3")]
StandardGs3,
#[serde(rename = "Standard_GS4")]
StandardGs4,
#[serde(rename = "Standard_GS5")]
StandardGs5,
#[serde(rename = "Standard_GS4-8")]
StandardGs48,
#[serde(rename = "Standard_GS4-4")]
StandardGs44,
#[serde(rename = "Standard_GS5-16")]
StandardGs516,
#[serde(rename = "Standard_GS5-8")]
StandardGs58,
#[serde(rename = "Standard_H8")]
StandardH8,
#[serde(rename = "Standard_H16")]
StandardH16,
#[serde(rename = "Standard_H8m")]
StandardH8m,
#[serde(rename = "Standard_H16m")]
StandardH16m,
#[serde(rename = "Standard_H16r")]
StandardH16r,
#[serde(rename = "Standard_H16mr")]
StandardH16mr,
#[serde(rename = "Standard_L4s")]
StandardL4s,
#[serde(rename = "Standard_L8s")]
StandardL8s,
#[serde(rename = "Standard_L16s")]
StandardL16s,
#[serde(rename = "Standard_L32s")]
StandardL32s,
#[serde(rename = "Standard_M64s")]
StandardM64s,
#[serde(rename = "Standard_M64ms")]
StandardM64ms,
#[serde(rename = "Standard_M128s")]
StandardM128s,
#[serde(rename = "Standard_M128ms")]
StandardM128ms,
#[serde(rename = "Standard_M64-32ms")]
StandardM6432ms,
#[serde(rename = "Standard_M64-16ms")]
StandardM6416ms,
#[serde(rename = "Standard_M128-64ms")]
StandardM12864ms,
#[serde(rename = "Standard_M128-32ms")]
StandardM12832ms,
#[serde(rename = "Standard_NC6")]
StandardNc6,
#[serde(rename = "Standard_NC12")]
StandardNc12,
#[serde(rename = "Standard_NC24")]
StandardNc24,
#[serde(rename = "Standard_NC24r")]
StandardNc24r,
#[serde(rename = "Standard_NC6s_v2")]
StandardNc6sV2,
#[serde(rename = "Standard_NC12s_v2")]
StandardNc12sV2,
#[serde(rename = "Standard_NC24s_v2")]
StandardNc24sV2,
#[serde(rename = "Standard_NC24rs_v2")]
StandardNc24rsV2,
#[serde(rename = "Standard_NC6s_v3")]
StandardNc6sV3,
#[serde(rename = "Standard_NC12s_v3")]
StandardNc12sV3,
#[serde(rename = "Standard_NC24s_v3")]
StandardNc24sV3,
#[serde(rename = "Standard_NC24rs_v3")]
StandardNc24rsV3,
#[serde(rename = "Standard_ND6s")]
StandardNd6s,
#[serde(rename = "Standard_ND12s")]
StandardNd12s,
#[serde(rename = "Standard_ND24s")]
StandardNd24s,
#[serde(rename = "Standard_ND24rs")]
StandardNd24rs,
#[serde(rename = "Standard_NV6")]
StandardNv6,
#[serde(rename = "Standard_NV12")]
StandardNv12,
#[serde(rename = "Standard_NV24")]
StandardNv24,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VmSizeProperties {
#[serde(rename = "vCPUsAvailable", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us_available: Option<i32>,
#[serde(rename = "vCPUsPerCore", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us_per_core: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageReference {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub offer: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(rename = "exactVersion", default, skip_serializing_if = "Option::is_none")]
pub exact_version: Option<String>,
#[serde(rename = "sharedGalleryImageId", default, skip_serializing_if = "Option::is_none")]
pub shared_gallery_image_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultSecretReference {
#[serde(rename = "secretUrl")]
pub secret_url: String,
#[serde(rename = "sourceVault")]
pub source_vault: SubResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSetParameters {
#[serde(flatten)]
pub sub_resource: SubResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultKeyReference {
#[serde(rename = "keyUrl")]
pub key_url: String,
#[serde(rename = "sourceVault")]
pub source_vault: SubResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSettings {
#[serde(rename = "diskEncryptionKey", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_key: Option<KeyVaultSecretReference>,
#[serde(rename = "keyEncryptionKey", default, skip_serializing_if = "Option::is_none")]
pub key_encryption_key: Option<KeyVaultKeyReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualHardDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Caching {
None,
ReadOnly,
ReadWrite,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreateOption {
FromImage,
Empty,
Attach,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DetachOption {
ForceDetach,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StorageAccountType {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
#[serde(rename = "StandardSSD_LRS")]
StandardSsdLrs,
#[serde(rename = "UltraSSD_LRS")]
UltraSsdLrs,
#[serde(rename = "Premium_ZRS")]
PremiumZrs,
#[serde(rename = "StandardSSD_ZRS")]
StandardSsdZrs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DiffDiskOption {
Local,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DiffDiskPlacement {
CacheDisk,
ResourceDisk,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiffDiskSettings {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub option: Option<DiffDiskOption>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub placement: Option<DiffDiskPlacement>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedDiskParameters {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")]
pub storage_account_type: Option<StorageAccountType>,
#[serde(rename = "diskEncryptionSet", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_set: Option<DiskEncryptionSetParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsDisk {
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<os_disk::OsType>,
#[serde(rename = "encryptionSettings", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings: Option<DiskEncryptionSettings>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub vhd: Option<VirtualHardDisk>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub image: Option<VirtualHardDisk>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "writeAcceleratorEnabled", default, skip_serializing_if = "Option::is_none")]
pub write_accelerator_enabled: Option<bool>,
#[serde(rename = "diffDiskSettings", default, skip_serializing_if = "Option::is_none")]
pub diff_disk_settings: Option<DiffDiskSettings>,
#[serde(rename = "createOption")]
pub create_option: CreateOption,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<ManagedDiskParameters>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<DeleteOption>,
}
pub mod os_disk {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataDisk {
pub lun: i32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub vhd: Option<VirtualHardDisk>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub image: Option<VirtualHardDisk>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "writeAcceleratorEnabled", default, skip_serializing_if = "Option::is_none")]
pub write_accelerator_enabled: Option<bool>,
#[serde(rename = "createOption")]
pub create_option: CreateOption,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<ManagedDiskParameters>,
#[serde(rename = "toBeDetached", default, skip_serializing_if = "Option::is_none")]
pub to_be_detached: Option<bool>,
#[serde(rename = "diskIOPSReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_write: Option<i64>,
#[serde(rename = "diskMBpsReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_write: Option<i64>,
#[serde(rename = "detachOption", default, skip_serializing_if = "Option::is_none")]
pub detach_option: Option<DetachOption>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<DeleteOption>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CapacityReservationProfile {
#[serde(rename = "capacityReservationGroup", default, skip_serializing_if = "Option::is_none")]
pub capacity_reservation_group: Option<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StorageProfile {
#[serde(rename = "imageReference", default, skip_serializing_if = "Option::is_none")]
pub image_reference: Option<ImageReference>,
#[serde(rename = "osDisk", default, skip_serializing_if = "Option::is_none")]
pub os_disk: Option<OsDisk>,
#[serde(rename = "dataDisks", default, skip_serializing_if = "Vec::is_empty")]
pub data_disks: Vec<DataDisk>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UefiSettings {
#[serde(rename = "secureBootEnabled", default, skip_serializing_if = "Option::is_none")]
pub secure_boot_enabled: Option<bool>,
#[serde(rename = "vTpmEnabled", default, skip_serializing_if = "Option::is_none")]
pub v_tpm_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SecurityProfile {
#[serde(rename = "uefiSettings", default, skip_serializing_if = "Option::is_none")]
pub uefi_settings: Option<UefiSettings>,
#[serde(rename = "encryptionAtHost", default, skip_serializing_if = "Option::is_none")]
pub encryption_at_host: Option<bool>,
#[serde(rename = "securityType", default, skip_serializing_if = "Option::is_none")]
pub security_type: Option<security_profile::SecurityType>,
}
pub mod security_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SecurityType {
TrustedLaunch,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VmGalleryApplication {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub order: Option<i32>,
#[serde(rename = "packageReferenceId")]
pub package_reference_id: String,
#[serde(rename = "configurationReference", default, skip_serializing_if = "Option::is_none")]
pub configuration_reference: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApplicationProfile {
#[serde(rename = "galleryApplications", default, skip_serializing_if = "Vec::is_empty")]
pub gallery_applications: Vec<VmGalleryApplication>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdditionalCapabilities {
#[serde(rename = "ultraSSDEnabled", default, skip_serializing_if = "Option::is_none")]
pub ultra_ssd_enabled: Option<bool>,
#[serde(rename = "hibernationEnabled", default, skip_serializing_if = "Option::is_none")]
pub hibernation_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdditionalUnattendContent {
#[serde(rename = "passName", default, skip_serializing_if = "Option::is_none")]
pub pass_name: Option<additional_unattend_content::PassName>,
#[serde(rename = "componentName", default, skip_serializing_if = "Option::is_none")]
pub component_name: Option<additional_unattend_content::ComponentName>,
#[serde(rename = "settingName", default, skip_serializing_if = "Option::is_none")]
pub setting_name: Option<additional_unattend_content::SettingName>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub content: Option<String>,
}
pub mod additional_unattend_content {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PassName {
OobeSystem,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ComponentName {
#[serde(rename = "Microsoft-Windows-Shell-Setup")]
MicrosoftWindowsShellSetup,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SettingName {
AutoLogon,
FirstLogonCommands,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WinRmListener {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<win_rm_listener::Protocol>,
#[serde(rename = "certificateUrl", default, skip_serializing_if = "Option::is_none")]
pub certificate_url: Option<String>,
}
pub mod win_rm_listener {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
Http,
Https,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WinRmConfiguration {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub listeners: Vec<WinRmListener>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WindowsConfiguration {
#[serde(rename = "provisionVMAgent", default, skip_serializing_if = "Option::is_none")]
pub provision_vm_agent: Option<bool>,
#[serde(rename = "enableAutomaticUpdates", default, skip_serializing_if = "Option::is_none")]
pub enable_automatic_updates: Option<bool>,
#[serde(rename = "timeZone", default, skip_serializing_if = "Option::is_none")]
pub time_zone: Option<String>,
#[serde(rename = "additionalUnattendContent", default, skip_serializing_if = "Vec::is_empty")]
pub additional_unattend_content: Vec<AdditionalUnattendContent>,
#[serde(rename = "patchSettings", default, skip_serializing_if = "Option::is_none")]
pub patch_settings: Option<PatchSettings>,
#[serde(rename = "winRM", default, skip_serializing_if = "Option::is_none")]
pub win_rm: Option<WinRmConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshPublicKey {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(rename = "keyData", default, skip_serializing_if = "Option::is_none")]
pub key_data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SshConfiguration {
#[serde(rename = "publicKeys", default, skip_serializing_if = "Vec::is_empty")]
pub public_keys: Vec<SshPublicKey>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinuxConfiguration {
#[serde(rename = "disablePasswordAuthentication", default, skip_serializing_if = "Option::is_none")]
pub disable_password_authentication: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub ssh: Option<SshConfiguration>,
#[serde(rename = "provisionVMAgent", default, skip_serializing_if = "Option::is_none")]
pub provision_vm_agent: Option<bool>,
#[serde(rename = "patchSettings", default, skip_serializing_if = "Option::is_none")]
pub patch_settings: Option<LinuxPatchSettings>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultCertificate {
#[serde(rename = "certificateUrl", default, skip_serializing_if = "Option::is_none")]
pub certificate_url: Option<String>,
#[serde(rename = "certificateStore", default, skip_serializing_if = "Option::is_none")]
pub certificate_store: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VaultSecretGroup {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SubResource>,
#[serde(rename = "vaultCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub vault_certificates: Vec<VaultCertificate>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsProfile {
#[serde(rename = "computerName", default, skip_serializing_if = "Option::is_none")]
pub computer_name: Option<String>,
#[serde(rename = "adminUsername", default, skip_serializing_if = "Option::is_none")]
pub admin_username: Option<String>,
#[serde(rename = "adminPassword", default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
#[serde(rename = "customData", default, skip_serializing_if = "Option::is_none")]
pub custom_data: Option<String>,
#[serde(rename = "windowsConfiguration", default, skip_serializing_if = "Option::is_none")]
pub windows_configuration: Option<WindowsConfiguration>,
#[serde(rename = "linuxConfiguration", default, skip_serializing_if = "Option::is_none")]
pub linux_configuration: Option<LinuxConfiguration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<VaultSecretGroup>,
#[serde(rename = "allowExtensionOperations", default, skip_serializing_if = "Option::is_none")]
pub allow_extension_operations: Option<bool>,
#[serde(rename = "requireGuestProvisionSignal", default, skip_serializing_if = "Option::is_none")]
pub require_guest_provision_signal: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutomaticRepairsPolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "gracePeriod", default, skip_serializing_if = "Option::is_none")]
pub grace_period: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceReferenceProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<network_interface_reference_properties::DeleteOption>,
}
pub mod network_interface_reference_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkInterfaceReference {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NetworkInterfaceReferenceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineIpTag {
#[serde(rename = "ipTagType", default, skip_serializing_if = "Option::is_none")]
pub ip_tag_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachinePublicIpAddressDnsSettingsConfiguration {
#[serde(rename = "domainNameLabel")]
pub domain_name_label: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachinePublicIpAddressConfigurationProperties {
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_public_ip_address_configuration_properties::DeleteOption>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachinePublicIpAddressDnsSettingsConfiguration>,
#[serde(rename = "ipTags", default, skip_serializing_if = "Vec::is_empty")]
pub ip_tags: Vec<VirtualMachineIpTag>,
#[serde(rename = "publicIPPrefix", default, skip_serializing_if = "Option::is_none")]
pub public_ip_prefix: Option<SubResource>,
#[serde(rename = "publicIPAddressVersion", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_version: Option<virtual_machine_public_ip_address_configuration_properties::PublicIpAddressVersion>,
#[serde(rename = "publicIPAllocationMethod", default, skip_serializing_if = "Option::is_none")]
pub public_ip_allocation_method: Option<virtual_machine_public_ip_address_configuration_properties::PublicIpAllocationMethod>,
}
pub mod virtual_machine_public_ip_address_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicIpAddressVersion {
IPv4,
IPv6,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicIpAllocationMethod {
Dynamic,
Static,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PublicIpAddressSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<public_ip_address_sku::Name>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<public_ip_address_sku::Tier>,
}
pub mod public_ip_address_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
Basic,
Standard,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Tier {
Regional,
Global,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachinePublicIpAddressConfiguration {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachinePublicIpAddressConfigurationProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<PublicIpAddressSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineNetworkInterfaceIpConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "publicIPAddressConfiguration", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_configuration: Option<VirtualMachinePublicIpAddressConfiguration>,
#[serde(rename = "privateIPAddressVersion", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address_version: Option<virtual_machine_network_interface_ip_configuration_properties::PrivateIpAddressVersion>,
#[serde(rename = "applicationSecurityGroups", default, skip_serializing_if = "Vec::is_empty")]
pub application_security_groups: Vec<SubResource>,
#[serde(rename = "applicationGatewayBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub application_gateway_backend_address_pools: Vec<SubResource>,
#[serde(rename = "loadBalancerBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_backend_address_pools: Vec<SubResource>,
}
pub mod virtual_machine_network_interface_ip_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAddressVersion {
IPv4,
IPv6,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineNetworkInterfaceIpConfiguration {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineNetworkInterfaceIpConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineNetworkInterfaceDnsSettingsConfiguration {
#[serde(rename = "dnsServers", default, skip_serializing_if = "Vec::is_empty")]
pub dns_servers: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineNetworkInterfaceConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_network_interface_configuration_properties::DeleteOption>,
#[serde(rename = "enableAcceleratedNetworking", default, skip_serializing_if = "Option::is_none")]
pub enable_accelerated_networking: Option<bool>,
#[serde(rename = "enableFpga", default, skip_serializing_if = "Option::is_none")]
pub enable_fpga: Option<bool>,
#[serde(rename = "enableIPForwarding", default, skip_serializing_if = "Option::is_none")]
pub enable_ip_forwarding: Option<bool>,
#[serde(rename = "networkSecurityGroup", default, skip_serializing_if = "Option::is_none")]
pub network_security_group: Option<SubResource>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachineNetworkInterfaceDnsSettingsConfiguration>,
#[serde(rename = "ipConfigurations")]
pub ip_configurations: Vec<VirtualMachineNetworkInterfaceIpConfiguration>,
#[serde(rename = "dscpConfiguration", default, skip_serializing_if = "Option::is_none")]
pub dscp_configuration: Option<SubResource>,
}
pub mod virtual_machine_network_interface_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineNetworkInterfaceConfiguration {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineNetworkInterfaceConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkProfile {
#[serde(rename = "networkInterfaces", default, skip_serializing_if = "Vec::is_empty")]
pub network_interfaces: Vec<NetworkInterfaceReference>,
#[serde(rename = "networkApiVersion", default, skip_serializing_if = "Option::is_none")]
pub network_api_version: Option<network_profile::NetworkApiVersion>,
#[serde(rename = "networkInterfaceConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub network_interface_configurations: Vec<VirtualMachineNetworkInterfaceConfiguration>,
}
pub mod network_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NetworkApiVersion {
#[serde(rename = "2020-11-01")]
N2020_11_01,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BootDiagnostics {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "storageUri", default, skip_serializing_if = "Option::is_none")]
pub storage_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnosticsProfile {
#[serde(rename = "bootDiagnostics", default, skip_serializing_if = "Option::is_none")]
pub boot_diagnostics: Option<BootDiagnostics>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Priority {
Regular,
Low,
Spot,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum EvictionPolicy {
Deallocate,
Delete,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BillingProfile {
#[serde(rename = "maxPrice", default, skip_serializing_if = "Option::is_none")]
pub max_price: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineExtensionHandlerInstanceView {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineAgentInstanceView {
#[serde(rename = "vmAgentVersion", default, skip_serializing_if = "Option::is_none")]
pub vm_agent_version: Option<String>,
#[serde(rename = "extensionHandlers", default, skip_serializing_if = "Vec::is_empty")]
pub extension_handlers: Vec<VirtualMachineExtensionHandlerInstanceView>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskInstanceView {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "encryptionSettings", default, skip_serializing_if = "Vec::is_empty")]
pub encryption_settings: Vec<DiskEncryptionSettings>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BootDiagnosticsInstanceView {
#[serde(rename = "consoleScreenshotBlobUri", default, skip_serializing_if = "Option::is_none")]
pub console_screenshot_blob_uri: Option<String>,
#[serde(rename = "serialConsoleLogBlobUri", default, skip_serializing_if = "Option::is_none")]
pub serial_console_log_blob_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineIdentity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<virtual_machine_identity::Type>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<serde_json::Value>,
}
pub mod virtual_machine_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned, UserAssigned")]
SystemAssignedUserAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct MaintenanceRedeployStatus {
#[serde(rename = "isCustomerInitiatedMaintenanceAllowed", default, skip_serializing_if = "Option::is_none")]
pub is_customer_initiated_maintenance_allowed: Option<bool>,
#[serde(rename = "preMaintenanceWindowStartTime", default, skip_serializing_if = "Option::is_none")]
pub pre_maintenance_window_start_time: Option<String>,
#[serde(rename = "preMaintenanceWindowEndTime", default, skip_serializing_if = "Option::is_none")]
pub pre_maintenance_window_end_time: Option<String>,
#[serde(rename = "maintenanceWindowStartTime", default, skip_serializing_if = "Option::is_none")]
pub maintenance_window_start_time: Option<String>,
#[serde(rename = "maintenanceWindowEndTime", default, skip_serializing_if = "Option::is_none")]
pub maintenance_window_end_time: Option<String>,
#[serde(rename = "lastOperationResultCode", default, skip_serializing_if = "Option::is_none")]
pub last_operation_result_code: Option<maintenance_redeploy_status::LastOperationResultCode>,
#[serde(rename = "lastOperationMessage", default, skip_serializing_if = "Option::is_none")]
pub last_operation_message: Option<String>,
}
pub mod maintenance_redeploy_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastOperationResultCode {
None,
RetryLater,
MaintenanceAborted,
MaintenanceCompleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineInstanceView {
#[serde(rename = "platformUpdateDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_update_domain: Option<i32>,
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "computerName", default, skip_serializing_if = "Option::is_none")]
pub computer_name: Option<String>,
#[serde(rename = "osName", default, skip_serializing_if = "Option::is_none")]
pub os_name: Option<String>,
#[serde(rename = "osVersion", default, skip_serializing_if = "Option::is_none")]
pub os_version: Option<String>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<virtual_machine_instance_view::HyperVGeneration>,
#[serde(rename = "rdpThumbPrint", default, skip_serializing_if = "Option::is_none")]
pub rdp_thumb_print: Option<String>,
#[serde(rename = "vmAgent", default, skip_serializing_if = "Option::is_none")]
pub vm_agent: Option<VirtualMachineAgentInstanceView>,
#[serde(rename = "maintenanceRedeployStatus", default, skip_serializing_if = "Option::is_none")]
pub maintenance_redeploy_status: Option<MaintenanceRedeployStatus>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub disks: Vec<DiskInstanceView>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<VirtualMachineExtensionInstanceView>,
#[serde(rename = "vmHealth", default, skip_serializing_if = "Option::is_none")]
pub vm_health: Option<VirtualMachineHealthStatus>,
#[serde(rename = "bootDiagnostics", default, skip_serializing_if = "Option::is_none")]
pub boot_diagnostics: Option<BootDiagnosticsInstanceView>,
#[serde(rename = "assignedHost", default, skip_serializing_if = "Option::is_none")]
pub assigned_host: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
#[serde(rename = "patchStatus", default, skip_serializing_if = "Option::is_none")]
pub patch_status: Option<VirtualMachinePatchStatus>,
}
pub mod virtual_machine_instance_view {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineProperties {
#[serde(rename = "hardwareProfile", default, skip_serializing_if = "Option::is_none")]
pub hardware_profile: Option<HardwareProfile>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<StorageProfile>,
#[serde(rename = "additionalCapabilities", default, skip_serializing_if = "Option::is_none")]
pub additional_capabilities: Option<AdditionalCapabilities>,
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<OsProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<NetworkProfile>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<SecurityProfile>,
#[serde(rename = "diagnosticsProfile", default, skip_serializing_if = "Option::is_none")]
pub diagnostics_profile: Option<DiagnosticsProfile>,
#[serde(rename = "availabilitySet", default, skip_serializing_if = "Option::is_none")]
pub availability_set: Option<SubResource>,
#[serde(rename = "virtualMachineScaleSet", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_scale_set: Option<SubResource>,
#[serde(rename = "proximityPlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub proximity_placement_group: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority: Option<Priority>,
#[serde(rename = "evictionPolicy", default, skip_serializing_if = "Option::is_none")]
pub eviction_policy: Option<EvictionPolicy>,
#[serde(rename = "billingProfile", default, skip_serializing_if = "Option::is_none")]
pub billing_profile: Option<BillingProfile>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub host: Option<SubResource>,
#[serde(rename = "hostGroup", default, skip_serializing_if = "Option::is_none")]
pub host_group: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<VirtualMachineInstanceView>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<String>,
#[serde(rename = "vmId", default, skip_serializing_if = "Option::is_none")]
pub vm_id: Option<String>,
#[serde(rename = "extensionsTimeBudget", default, skip_serializing_if = "Option::is_none")]
pub extensions_time_budget: Option<String>,
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "scheduledEventsProfile", default, skip_serializing_if = "Option::is_none")]
pub scheduled_events_profile: Option<ScheduledEventsProfile>,
#[serde(rename = "userData", default, skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
#[serde(rename = "capacityReservation", default, skip_serializing_if = "Option::is_none")]
pub capacity_reservation: Option<CapacityReservationProfile>,
#[serde(rename = "applicationProfile", default, skip_serializing_if = "Option::is_none")]
pub application_profile: Option<ApplicationProfile>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachine {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<Plan>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineProperties>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub resources: Vec<VirtualMachineExtension>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<VirtualMachineIdentity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<Plan>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<VirtualMachineIdentity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineListResult {
pub value: Vec<VirtualMachine>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Sku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AutomaticOsUpgradePolicy {
#[serde(rename = "enableAutomaticOSUpgrade", default, skip_serializing_if = "Option::is_none")]
pub enable_automatic_os_upgrade: Option<bool>,
#[serde(rename = "disableAutomaticRollback", default, skip_serializing_if = "Option::is_none")]
pub disable_automatic_rollback: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpgradePolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mode: Option<upgrade_policy::Mode>,
#[serde(rename = "rollingUpgradePolicy", default, skip_serializing_if = "Option::is_none")]
pub rolling_upgrade_policy: Option<RollingUpgradePolicy>,
#[serde(rename = "automaticOSUpgradePolicy", default, skip_serializing_if = "Option::is_none")]
pub automatic_os_upgrade_policy: Option<AutomaticOsUpgradePolicy>,
}
pub mod upgrade_policy {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Mode {
Automatic,
Manual,
Rolling,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollingUpgradePolicy {
#[serde(rename = "maxBatchInstancePercent", default, skip_serializing_if = "Option::is_none")]
pub max_batch_instance_percent: Option<i32>,
#[serde(rename = "maxUnhealthyInstancePercent", default, skip_serializing_if = "Option::is_none")]
pub max_unhealthy_instance_percent: Option<i32>,
#[serde(rename = "maxUnhealthyUpgradedInstancePercent", default, skip_serializing_if = "Option::is_none")]
pub max_unhealthy_upgraded_instance_percent: Option<i32>,
#[serde(rename = "pauseTimeBetweenBatches", default, skip_serializing_if = "Option::is_none")]
pub pause_time_between_batches: Option<String>,
#[serde(rename = "enableCrossZoneUpgrade", default, skip_serializing_if = "Option::is_none")]
pub enable_cross_zone_upgrade: Option<bool>,
#[serde(rename = "prioritizeUnhealthyInstances", default, skip_serializing_if = "Option::is_none")]
pub prioritize_unhealthy_instances: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScaleInPolicy {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub rules: Vec<String>,
#[serde(rename = "forceDeletion", default, skip_serializing_if = "Option::is_none")]
pub force_deletion: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SpotRestorePolicy {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "restoreTimeout", default, skip_serializing_if = "Option::is_none")]
pub restore_timeout: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OrchestrationMode {
Uniform,
Flexible,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageOsDisk {
#[serde(flatten)]
pub image_disk: ImageDisk,
#[serde(rename = "osType")]
pub os_type: image_os_disk::OsType,
#[serde(rename = "osState")]
pub os_state: image_os_disk::OsState,
}
pub mod image_os_disk {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsState {
Generalized,
Specialized,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageDataDisk {
#[serde(flatten)]
pub image_disk: ImageDisk,
pub lun: i32,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub snapshot: Option<SubResource>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<SubResource>,
#[serde(rename = "blobUri", default, skip_serializing_if = "Option::is_none")]
pub blob_uri: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<image_disk::Caching>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")]
pub storage_account_type: Option<StorageAccountType>,
#[serde(rename = "diskEncryptionSet", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_set: Option<DiskEncryptionSetParameters>,
}
pub mod image_disk {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Caching {
None,
ReadOnly,
ReadWrite,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageStorageProfile {
#[serde(rename = "osDisk", default, skip_serializing_if = "Option::is_none")]
pub os_disk: Option<ImageOsDisk>,
#[serde(rename = "dataDisks", default, skip_serializing_if = "Vec::is_empty")]
pub data_disks: Vec<ImageDataDisk>,
#[serde(rename = "zoneResilient", default, skip_serializing_if = "Option::is_none")]
pub zone_resilient: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageProperties {
#[serde(rename = "sourceVirtualMachine", default, skip_serializing_if = "Option::is_none")]
pub source_virtual_machine: Option<SubResource>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<ImageStorageProfile>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<HyperVGenerationType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Image {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ImageProperties>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageListResult {
pub value: Vec<Image>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetIdentity {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<virtual_machine_scale_set_identity::Type>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<serde_json::Value>,
}
pub mod virtual_machine_scale_set_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned, UserAssigned")]
SystemAssignedUserAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetOsProfile {
#[serde(rename = "computerNamePrefix", default, skip_serializing_if = "Option::is_none")]
pub computer_name_prefix: Option<String>,
#[serde(rename = "adminUsername", default, skip_serializing_if = "Option::is_none")]
pub admin_username: Option<String>,
#[serde(rename = "adminPassword", default, skip_serializing_if = "Option::is_none")]
pub admin_password: Option<String>,
#[serde(rename = "customData", default, skip_serializing_if = "Option::is_none")]
pub custom_data: Option<String>,
#[serde(rename = "windowsConfiguration", default, skip_serializing_if = "Option::is_none")]
pub windows_configuration: Option<WindowsConfiguration>,
#[serde(rename = "linuxConfiguration", default, skip_serializing_if = "Option::is_none")]
pub linux_configuration: Option<LinuxConfiguration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<VaultSecretGroup>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateOsProfile {
#[serde(rename = "customData", default, skip_serializing_if = "Option::is_none")]
pub custom_data: Option<String>,
#[serde(rename = "windowsConfiguration", default, skip_serializing_if = "Option::is_none")]
pub windows_configuration: Option<WindowsConfiguration>,
#[serde(rename = "linuxConfiguration", default, skip_serializing_if = "Option::is_none")]
pub linux_configuration: Option<LinuxConfiguration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<VaultSecretGroup>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetManagedDiskParameters {
#[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")]
pub storage_account_type: Option<StorageAccountType>,
#[serde(rename = "diskEncryptionSet", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_set: Option<DiskEncryptionSetParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetOsDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "writeAcceleratorEnabled", default, skip_serializing_if = "Option::is_none")]
pub write_accelerator_enabled: Option<bool>,
#[serde(rename = "createOption")]
pub create_option: CreateOption,
#[serde(rename = "diffDiskSettings", default, skip_serializing_if = "Option::is_none")]
pub diff_disk_settings: Option<DiffDiskSettings>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<virtual_machine_scale_set_os_disk::OsType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub image: Option<VirtualHardDisk>,
#[serde(rename = "vhdContainers", default, skip_serializing_if = "Vec::is_empty")]
pub vhd_containers: Vec<String>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<VirtualMachineScaleSetManagedDiskParameters>,
}
pub mod virtual_machine_scale_set_os_disk {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateOsDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "writeAcceleratorEnabled", default, skip_serializing_if = "Option::is_none")]
pub write_accelerator_enabled: Option<bool>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub image: Option<VirtualHardDisk>,
#[serde(rename = "vhdContainers", default, skip_serializing_if = "Vec::is_empty")]
pub vhd_containers: Vec<String>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<VirtualMachineScaleSetManagedDiskParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetDataDisk {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
pub lun: i32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub caching: Option<Caching>,
#[serde(rename = "writeAcceleratorEnabled", default, skip_serializing_if = "Option::is_none")]
pub write_accelerator_enabled: Option<bool>,
#[serde(rename = "createOption")]
pub create_option: CreateOption,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "managedDisk", default, skip_serializing_if = "Option::is_none")]
pub managed_disk: Option<VirtualMachineScaleSetManagedDiskParameters>,
#[serde(rename = "diskIOPSReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_write: Option<i64>,
#[serde(rename = "diskMBpsReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_write: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetStorageProfile {
#[serde(rename = "imageReference", default, skip_serializing_if = "Option::is_none")]
pub image_reference: Option<ImageReference>,
#[serde(rename = "osDisk", default, skip_serializing_if = "Option::is_none")]
pub os_disk: Option<VirtualMachineScaleSetOsDisk>,
#[serde(rename = "dataDisks", default, skip_serializing_if = "Vec::is_empty")]
pub data_disks: Vec<VirtualMachineScaleSetDataDisk>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateStorageProfile {
#[serde(rename = "imageReference", default, skip_serializing_if = "Option::is_none")]
pub image_reference: Option<ImageReference>,
#[serde(rename = "osDisk", default, skip_serializing_if = "Option::is_none")]
pub os_disk: Option<VirtualMachineScaleSetUpdateOsDisk>,
#[serde(rename = "dataDisks", default, skip_serializing_if = "Vec::is_empty")]
pub data_disks: Vec<VirtualMachineScaleSetDataDisk>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiEntityReference {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetIpConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ApiEntityReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "publicIPAddressConfiguration", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_configuration: Option<VirtualMachineScaleSetPublicIpAddressConfiguration>,
#[serde(rename = "privateIPAddressVersion", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address_version: Option<virtual_machine_scale_set_ip_configuration_properties::PrivateIpAddressVersion>,
#[serde(rename = "applicationGatewayBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub application_gateway_backend_address_pools: Vec<SubResource>,
#[serde(rename = "applicationSecurityGroups", default, skip_serializing_if = "Vec::is_empty")]
pub application_security_groups: Vec<SubResource>,
#[serde(rename = "loadBalancerBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_backend_address_pools: Vec<SubResource>,
#[serde(rename = "loadBalancerInboundNatPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_inbound_nat_pools: Vec<SubResource>,
}
pub mod virtual_machine_scale_set_ip_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAddressVersion {
IPv4,
IPv6,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateIpConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<ApiEntityReference>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "publicIPAddressConfiguration", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_configuration: Option<VirtualMachineScaleSetUpdatePublicIpAddressConfiguration>,
#[serde(rename = "privateIPAddressVersion", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address_version: Option<virtual_machine_scale_set_update_ip_configuration_properties::PrivateIpAddressVersion>,
#[serde(rename = "applicationGatewayBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub application_gateway_backend_address_pools: Vec<SubResource>,
#[serde(rename = "applicationSecurityGroups", default, skip_serializing_if = "Vec::is_empty")]
pub application_security_groups: Vec<SubResource>,
#[serde(rename = "loadBalancerBackendAddressPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_backend_address_pools: Vec<SubResource>,
#[serde(rename = "loadBalancerInboundNatPools", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_inbound_nat_pools: Vec<SubResource>,
}
pub mod virtual_machine_scale_set_update_ip_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateIpAddressVersion {
IPv4,
IPv6,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetIpConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateIpConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetUpdateIpConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetNetworkConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "enableAcceleratedNetworking", default, skip_serializing_if = "Option::is_none")]
pub enable_accelerated_networking: Option<bool>,
#[serde(rename = "enableFpga", default, skip_serializing_if = "Option::is_none")]
pub enable_fpga: Option<bool>,
#[serde(rename = "networkSecurityGroup", default, skip_serializing_if = "Option::is_none")]
pub network_security_group: Option<SubResource>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachineScaleSetNetworkConfigurationDnsSettings>,
#[serde(rename = "ipConfigurations")]
pub ip_configurations: Vec<VirtualMachineScaleSetIpConfiguration>,
#[serde(rename = "enableIPForwarding", default, skip_serializing_if = "Option::is_none")]
pub enable_ip_forwarding: Option<bool>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_scale_set_network_configuration_properties::DeleteOption>,
}
pub mod virtual_machine_scale_set_network_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateNetworkConfigurationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<bool>,
#[serde(rename = "enableAcceleratedNetworking", default, skip_serializing_if = "Option::is_none")]
pub enable_accelerated_networking: Option<bool>,
#[serde(rename = "enableFpga", default, skip_serializing_if = "Option::is_none")]
pub enable_fpga: Option<bool>,
#[serde(rename = "networkSecurityGroup", default, skip_serializing_if = "Option::is_none")]
pub network_security_group: Option<SubResource>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachineScaleSetNetworkConfigurationDnsSettings>,
#[serde(rename = "ipConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub ip_configurations: Vec<VirtualMachineScaleSetUpdateIpConfiguration>,
#[serde(rename = "enableIPForwarding", default, skip_serializing_if = "Option::is_none")]
pub enable_ip_forwarding: Option<bool>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_scale_set_update_network_configuration_properties::DeleteOption>,
}
pub mod virtual_machine_scale_set_update_network_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetNetworkConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetNetworkConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateNetworkConfiguration {
#[serde(flatten)]
pub sub_resource: SubResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetUpdateNetworkConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetNetworkConfigurationDnsSettings {
#[serde(rename = "dnsServers", default, skip_serializing_if = "Vec::is_empty")]
pub dns_servers: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings {
#[serde(rename = "domainNameLabel")]
pub domain_name_label: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetIpTag {
#[serde(rename = "ipTagType", default, skip_serializing_if = "Option::is_none")]
pub ip_tag_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tag: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetPublicIpAddressConfiguration {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetPublicIpAddressConfigurationProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<PublicIpAddressSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdatePublicIpAddressConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetUpdatePublicIpAddressConfigurationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetPublicIpAddressConfigurationProperties {
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings>,
#[serde(rename = "ipTags", default, skip_serializing_if = "Vec::is_empty")]
pub ip_tags: Vec<VirtualMachineScaleSetIpTag>,
#[serde(rename = "publicIPPrefix", default, skip_serializing_if = "Option::is_none")]
pub public_ip_prefix: Option<SubResource>,
#[serde(rename = "publicIPAddressVersion", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_version: Option<virtual_machine_scale_set_public_ip_address_configuration_properties::PublicIpAddressVersion>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_scale_set_public_ip_address_configuration_properties::DeleteOption>,
}
pub mod virtual_machine_scale_set_public_ip_address_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicIpAddressVersion {
IPv4,
IPv6,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdatePublicIpAddressConfigurationProperties {
#[serde(rename = "idleTimeoutInMinutes", default, skip_serializing_if = "Option::is_none")]
pub idle_timeout_in_minutes: Option<i32>,
#[serde(rename = "dnsSettings", default, skip_serializing_if = "Option::is_none")]
pub dns_settings: Option<VirtualMachineScaleSetPublicIpAddressConfigurationDnsSettings>,
#[serde(rename = "deleteOption", default, skip_serializing_if = "Option::is_none")]
pub delete_option: Option<virtual_machine_scale_set_update_public_ip_address_configuration_properties::DeleteOption>,
}
pub mod virtual_machine_scale_set_update_public_ip_address_configuration_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DeleteOption {
Delete,
Detach,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetNetworkProfile {
#[serde(rename = "healthProbe", default, skip_serializing_if = "Option::is_none")]
pub health_probe: Option<ApiEntityReference>,
#[serde(rename = "networkInterfaceConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub network_interface_configurations: Vec<VirtualMachineScaleSetNetworkConfiguration>,
#[serde(rename = "networkApiVersion", default, skip_serializing_if = "Option::is_none")]
pub network_api_version: Option<virtual_machine_scale_set_network_profile::NetworkApiVersion>,
}
pub mod virtual_machine_scale_set_network_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NetworkApiVersion {
#[serde(rename = "2020-11-01")]
N2020_11_01,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateNetworkProfile {
#[serde(rename = "healthProbe", default, skip_serializing_if = "Option::is_none")]
pub health_probe: Option<ApiEntityReference>,
#[serde(rename = "networkInterfaceConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub network_interface_configurations: Vec<VirtualMachineScaleSetUpdateNetworkConfiguration>,
#[serde(rename = "networkApiVersion", default, skip_serializing_if = "Option::is_none")]
pub network_api_version: Option<virtual_machine_scale_set_update_network_profile::NetworkApiVersion>,
}
pub mod virtual_machine_scale_set_update_network_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NetworkApiVersion {
#[serde(rename = "2020-11-01")]
N2020_11_01,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetExtensionProperties {
#[serde(rename = "forceUpdateTag", default, skip_serializing_if = "Option::is_none")]
pub force_update_tag: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(rename = "autoUpgradeMinorVersion", default, skip_serializing_if = "Option::is_none")]
pub auto_upgrade_minor_version: Option<bool>,
#[serde(rename = "enableAutomaticUpgrade", default, skip_serializing_if = "Option::is_none")]
pub enable_automatic_upgrade: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<serde_json::Value>,
#[serde(rename = "protectedSettings", default, skip_serializing_if = "Option::is_none")]
pub protected_settings: Option<serde_json::Value>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "provisionAfterExtensions", default, skip_serializing_if = "Vec::is_empty")]
pub provision_after_extensions: Vec<String>,
#[serde(rename = "suppressFailures", default, skip_serializing_if = "Option::is_none")]
pub suppress_failures: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetExtension {
#[serde(flatten)]
pub sub_resource_read_only: SubResourceReadOnly,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetExtensionUpdate {
#[serde(flatten)]
pub sub_resource_read_only: SubResourceReadOnly,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetExtensionListResult {
pub value: Vec<VirtualMachineScaleSetExtension>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetExtensionProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<VirtualMachineScaleSetExtension>,
#[serde(rename = "extensionsTimeBudget", default, skip_serializing_if = "Option::is_none")]
pub extensions_time_budget: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmProfile {
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<VirtualMachineScaleSetOsProfile>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<VirtualMachineScaleSetStorageProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<VirtualMachineScaleSetNetworkProfile>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<SecurityProfile>,
#[serde(rename = "diagnosticsProfile", default, skip_serializing_if = "Option::is_none")]
pub diagnostics_profile: Option<DiagnosticsProfile>,
#[serde(rename = "extensionProfile", default, skip_serializing_if = "Option::is_none")]
pub extension_profile: Option<VirtualMachineScaleSetExtensionProfile>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub priority: Option<Priority>,
#[serde(rename = "evictionPolicy", default, skip_serializing_if = "Option::is_none")]
pub eviction_policy: Option<EvictionPolicy>,
#[serde(rename = "billingProfile", default, skip_serializing_if = "Option::is_none")]
pub billing_profile: Option<BillingProfile>,
#[serde(rename = "scheduledEventsProfile", default, skip_serializing_if = "Option::is_none")]
pub scheduled_events_profile: Option<ScheduledEventsProfile>,
#[serde(rename = "userData", default, skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
#[serde(rename = "capacityReservation", default, skip_serializing_if = "Option::is_none")]
pub capacity_reservation: Option<CapacityReservationProfile>,
#[serde(rename = "applicationProfile", default, skip_serializing_if = "Option::is_none")]
pub application_profile: Option<ApplicationProfile>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateVmProfile {
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<VirtualMachineScaleSetUpdateOsProfile>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<VirtualMachineScaleSetUpdateStorageProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<VirtualMachineScaleSetUpdateNetworkProfile>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<SecurityProfile>,
#[serde(rename = "diagnosticsProfile", default, skip_serializing_if = "Option::is_none")]
pub diagnostics_profile: Option<DiagnosticsProfile>,
#[serde(rename = "extensionProfile", default, skip_serializing_if = "Option::is_none")]
pub extension_profile: Option<VirtualMachineScaleSetExtensionProfile>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<String>,
#[serde(rename = "billingProfile", default, skip_serializing_if = "Option::is_none")]
pub billing_profile: Option<BillingProfile>,
#[serde(rename = "scheduledEventsProfile", default, skip_serializing_if = "Option::is_none")]
pub scheduled_events_profile: Option<ScheduledEventsProfile>,
#[serde(rename = "userData", default, skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetProperties {
#[serde(rename = "upgradePolicy", default, skip_serializing_if = "Option::is_none")]
pub upgrade_policy: Option<UpgradePolicy>,
#[serde(rename = "automaticRepairsPolicy", default, skip_serializing_if = "Option::is_none")]
pub automatic_repairs_policy: Option<AutomaticRepairsPolicy>,
#[serde(rename = "virtualMachineProfile", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_profile: Option<VirtualMachineScaleSetVmProfile>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub overprovision: Option<bool>,
#[serde(
rename = "doNotRunExtensionsOnOverprovisionedVMs",
default,
skip_serializing_if = "Option::is_none"
)]
pub do_not_run_extensions_on_overprovisioned_v_ms: Option<bool>,
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
#[serde(rename = "singlePlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub single_placement_group: Option<bool>,
#[serde(rename = "zoneBalance", default, skip_serializing_if = "Option::is_none")]
pub zone_balance: Option<bool>,
#[serde(rename = "platformFaultDomainCount", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain_count: Option<i32>,
#[serde(rename = "proximityPlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub proximity_placement_group: Option<SubResource>,
#[serde(rename = "hostGroup", default, skip_serializing_if = "Option::is_none")]
pub host_group: Option<SubResource>,
#[serde(rename = "additionalCapabilities", default, skip_serializing_if = "Option::is_none")]
pub additional_capabilities: Option<AdditionalCapabilities>,
#[serde(rename = "scaleInPolicy", default, skip_serializing_if = "Option::is_none")]
pub scale_in_policy: Option<ScaleInPolicy>,
#[serde(rename = "orchestrationMode", default, skip_serializing_if = "Option::is_none")]
pub orchestration_mode: Option<OrchestrationMode>,
#[serde(rename = "spotRestorePolicy", default, skip_serializing_if = "Option::is_none")]
pub spot_restore_policy: Option<SpotRestorePolicy>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdateProperties {
#[serde(rename = "upgradePolicy", default, skip_serializing_if = "Option::is_none")]
pub upgrade_policy: Option<UpgradePolicy>,
#[serde(rename = "automaticRepairsPolicy", default, skip_serializing_if = "Option::is_none")]
pub automatic_repairs_policy: Option<AutomaticRepairsPolicy>,
#[serde(rename = "virtualMachineProfile", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine_profile: Option<VirtualMachineScaleSetUpdateVmProfile>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub overprovision: Option<bool>,
#[serde(
rename = "doNotRunExtensionsOnOverprovisionedVMs",
default,
skip_serializing_if = "Option::is_none"
)]
pub do_not_run_extensions_on_overprovisioned_v_ms: Option<bool>,
#[serde(rename = "singlePlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub single_placement_group: Option<bool>,
#[serde(rename = "additionalCapabilities", default, skip_serializing_if = "Option::is_none")]
pub additional_capabilities: Option<AdditionalCapabilities>,
#[serde(rename = "scaleInPolicy", default, skip_serializing_if = "Option::is_none")]
pub scale_in_policy: Option<ScaleInPolicy>,
#[serde(rename = "proximityPlacementGroup", default, skip_serializing_if = "Option::is_none")]
pub proximity_placement_group: Option<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSet {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<Plan>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<VirtualMachineScaleSetIdentity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmReimageParameters {
#[serde(flatten)]
pub virtual_machine_reimage_parameters: VirtualMachineReimageParameters,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetReimageParameters {
#[serde(flatten)]
pub virtual_machine_scale_set_vm_reimage_parameters: VirtualMachineScaleSetVmReimageParameters,
#[serde(rename = "instanceIds", default, skip_serializing_if = "Vec::is_empty")]
pub instance_ids: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<Plan>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<VirtualMachineScaleSetIdentity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmInstanceIDs {
#[serde(rename = "instanceIds", default, skip_serializing_if = "Vec::is_empty")]
pub instance_ids: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmInstanceRequiredIDs {
#[serde(rename = "instanceIds")]
pub instance_ids: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineStatusCodeCount {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetInstanceViewStatusesSummary {
#[serde(rename = "statusesSummary", default, skip_serializing_if = "Vec::is_empty")]
pub statuses_summary: Vec<VirtualMachineStatusCodeCount>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmExtensionsSummary {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "statusesSummary", default, skip_serializing_if = "Vec::is_empty")]
pub statuses_summary: Vec<VirtualMachineStatusCodeCount>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrchestrationServiceSummary {
#[serde(rename = "serviceName", default, skip_serializing_if = "Option::is_none")]
pub service_name: Option<orchestration_service_summary::ServiceName>,
#[serde(rename = "serviceState", default, skip_serializing_if = "Option::is_none")]
pub service_state: Option<orchestration_service_summary::ServiceState>,
}
pub mod orchestration_service_summary {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceName {
AutomaticRepairs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceState {
NotRunning,
Running,
Suspended,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetInstanceView {
#[serde(rename = "virtualMachine", default, skip_serializing_if = "Option::is_none")]
pub virtual_machine: Option<VirtualMachineScaleSetInstanceViewStatusesSummary>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<VirtualMachineScaleSetVmExtensionsSummary>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
#[serde(rename = "orchestrationServices", default, skip_serializing_if = "Vec::is_empty")]
pub orchestration_services: Vec<OrchestrationServiceSummary>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetListResult {
pub value: Vec<VirtualMachineScaleSet>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetListWithLinkResult {
pub value: Vec<VirtualMachineScaleSet>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetSkuCapacity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<i64>,
#[serde(rename = "defaultCapacity", default, skip_serializing_if = "Option::is_none")]
pub default_capacity: Option<i64>,
#[serde(rename = "scaleType", default, skip_serializing_if = "Option::is_none")]
pub scale_type: Option<virtual_machine_scale_set_sku_capacity::ScaleType>,
}
pub mod virtual_machine_scale_set_sku_capacity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ScaleType {
Automatic,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetSku {
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<VirtualMachineScaleSetSkuCapacity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetListSkusResult {
pub value: Vec<VirtualMachineScaleSetSku>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollbackStatusInfo {
#[serde(rename = "successfullyRolledbackInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub successfully_rolledback_instance_count: Option<i32>,
#[serde(rename = "failedRolledbackInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub failed_rolledback_instance_count: Option<i32>,
#[serde(rename = "rollbackError", default, skip_serializing_if = "Option::is_none")]
pub rollback_error: Option<ApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpgradeOperationHistoryStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<upgrade_operation_history_status::Code>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
}
pub mod upgrade_operation_history_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Code {
RollingForward,
Cancelled,
Completed,
Faulted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpgradeOperationHistoricalStatusInfoProperties {
#[serde(rename = "runningStatus", default, skip_serializing_if = "Option::is_none")]
pub running_status: Option<UpgradeOperationHistoryStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub progress: Option<RollingUpgradeProgressInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
#[serde(rename = "startedBy", default, skip_serializing_if = "Option::is_none")]
pub started_by: Option<upgrade_operation_historical_status_info_properties::StartedBy>,
#[serde(rename = "targetImageReference", default, skip_serializing_if = "Option::is_none")]
pub target_image_reference: Option<ImageReference>,
#[serde(rename = "rollbackInfo", default, skip_serializing_if = "Option::is_none")]
pub rollback_info: Option<RollbackStatusInfo>,
}
pub mod upgrade_operation_historical_status_info_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StartedBy {
Unknown,
User,
Platform,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpgradeOperationHistoricalStatusInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UpgradeOperationHistoricalStatusInfoProperties>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetListOsUpgradeHistory {
pub value: Vec<UpgradeOperationHistoricalStatusInfo>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmProperties {
#[serde(rename = "latestModelApplied", default, skip_serializing_if = "Option::is_none")]
pub latest_model_applied: Option<bool>,
#[serde(rename = "vmId", default, skip_serializing_if = "Option::is_none")]
pub vm_id: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<VirtualMachineScaleSetVmInstanceView>,
#[serde(rename = "hardwareProfile", default, skip_serializing_if = "Option::is_none")]
pub hardware_profile: Option<HardwareProfile>,
#[serde(rename = "storageProfile", default, skip_serializing_if = "Option::is_none")]
pub storage_profile: Option<StorageProfile>,
#[serde(rename = "additionalCapabilities", default, skip_serializing_if = "Option::is_none")]
pub additional_capabilities: Option<AdditionalCapabilities>,
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<OsProfile>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<SecurityProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<NetworkProfile>,
#[serde(rename = "networkProfileConfiguration", default, skip_serializing_if = "Option::is_none")]
pub network_profile_configuration: Option<VirtualMachineScaleSetVmNetworkProfileConfiguration>,
#[serde(rename = "diagnosticsProfile", default, skip_serializing_if = "Option::is_none")]
pub diagnostics_profile: Option<DiagnosticsProfile>,
#[serde(rename = "availabilitySet", default, skip_serializing_if = "Option::is_none")]
pub availability_set: Option<SubResource>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "licenseType", default, skip_serializing_if = "Option::is_none")]
pub license_type: Option<String>,
#[serde(rename = "modelDefinitionApplied", default, skip_serializing_if = "Option::is_none")]
pub model_definition_applied: Option<String>,
#[serde(rename = "protectionPolicy", default, skip_serializing_if = "Option::is_none")]
pub protection_policy: Option<VirtualMachineScaleSetVmProtectionPolicy>,
#[serde(rename = "userData", default, skip_serializing_if = "Option::is_none")]
pub user_data: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVm {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "instanceId", default, skip_serializing_if = "Option::is_none")]
pub instance_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<Sku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineScaleSetVmProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub plan: Option<Plan>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub resources: Vec<VirtualMachineExtension>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmInstanceView {
#[serde(rename = "platformUpdateDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_update_domain: Option<i32>,
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "rdpThumbPrint", default, skip_serializing_if = "Option::is_none")]
pub rdp_thumb_print: Option<String>,
#[serde(rename = "vmAgent", default, skip_serializing_if = "Option::is_none")]
pub vm_agent: Option<VirtualMachineAgentInstanceView>,
#[serde(rename = "maintenanceRedeployStatus", default, skip_serializing_if = "Option::is_none")]
pub maintenance_redeploy_status: Option<MaintenanceRedeployStatus>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub disks: Vec<DiskInstanceView>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<VirtualMachineExtensionInstanceView>,
#[serde(rename = "vmHealth", default, skip_serializing_if = "Option::is_none")]
pub vm_health: Option<VirtualMachineHealthStatus>,
#[serde(rename = "bootDiagnostics", default, skip_serializing_if = "Option::is_none")]
pub boot_diagnostics: Option<BootDiagnosticsInstanceView>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
#[serde(rename = "assignedHost", default, skip_serializing_if = "Option::is_none")]
pub assigned_host: Option<String>,
#[serde(rename = "placementGroupId", default, skip_serializing_if = "Option::is_none")]
pub placement_group_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmNetworkProfileConfiguration {
#[serde(rename = "networkInterfaceConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub network_interface_configurations: Vec<VirtualMachineScaleSetNetworkConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmProtectionPolicy {
#[serde(rename = "protectFromScaleIn", default, skip_serializing_if = "Option::is_none")]
pub protect_from_scale_in: Option<bool>,
#[serde(rename = "protectFromScaleSetActions", default, skip_serializing_if = "Option::is_none")]
pub protect_from_scale_set_actions: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ScheduledEventsProfile {
#[serde(rename = "terminateNotificationProfile", default, skip_serializing_if = "Option::is_none")]
pub terminate_notification_profile: Option<TerminateNotificationProfile>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TerminateNotificationProfile {
#[serde(rename = "notBeforeTimeout", default, skip_serializing_if = "Option::is_none")]
pub not_before_timeout: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enable: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineHealthStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineScaleSetVmListResult {
pub value: Vec<VirtualMachineScaleSetVm>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollingUpgradeStatusInfo {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RollingUpgradeStatusInfoProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollingUpgradeStatusInfoProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policy: Option<RollingUpgradePolicy>,
#[serde(rename = "runningStatus", default, skip_serializing_if = "Option::is_none")]
pub running_status: Option<RollingUpgradeRunningStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub progress: Option<RollingUpgradeProgressInfo>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollingUpgradeRunningStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<rolling_upgrade_running_status::Code>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "lastAction", default, skip_serializing_if = "Option::is_none")]
pub last_action: Option<rolling_upgrade_running_status::LastAction>,
#[serde(rename = "lastActionTime", default, skip_serializing_if = "Option::is_none")]
pub last_action_time: Option<String>,
}
pub mod rolling_upgrade_running_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Code {
RollingForward,
Cancelled,
Completed,
Faulted,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastAction {
Start,
Cancel,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RollingUpgradeProgressInfo {
#[serde(rename = "successfulInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub successful_instance_count: Option<i32>,
#[serde(rename = "failedInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub failed_instance_count: Option<i32>,
#[serde(rename = "inProgressInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub in_progress_instance_count: Option<i32>,
#[serde(rename = "pendingInstanceCount", default, skip_serializing_if = "Option::is_none")]
pub pending_instance_count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiErrorBase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InnerError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub exceptiontype: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub errordetail: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudError {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiError {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ApiErrorBase>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub innererror: Option<InnerError>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubResourceReadOnly {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ExtendedLocationType {
EdgeZone,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ExtendedLocation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<ExtendedLocationType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecoveryWalkResponse {
#[serde(rename = "walkPerformed", default, skip_serializing_if = "Option::is_none")]
pub walk_performed: Option<bool>,
#[serde(rename = "nextPlatformUpdateDomain", default, skip_serializing_if = "Option::is_none")]
pub next_platform_update_domain: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RequestRateByIntervalInput {
#[serde(flatten)]
pub log_analytics_input_base: LogAnalyticsInputBase,
#[serde(rename = "intervalLength")]
pub interval_length: request_rate_by_interval_input::IntervalLength,
}
pub mod request_rate_by_interval_input {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum IntervalLength {
ThreeMins,
FiveMins,
ThirtyMins,
SixtyMins,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ThrottledRequestsInput {
#[serde(flatten)]
pub log_analytics_input_base: LogAnalyticsInputBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogAnalyticsInputBase {
#[serde(rename = "blobContainerSasUri")]
pub blob_container_sas_uri: String,
#[serde(rename = "fromTime")]
pub from_time: String,
#[serde(rename = "toTime")]
pub to_time: String,
#[serde(rename = "groupByThrottlePolicy", default, skip_serializing_if = "Option::is_none")]
pub group_by_throttle_policy: Option<bool>,
#[serde(rename = "groupByOperationName", default, skip_serializing_if = "Option::is_none")]
pub group_by_operation_name: Option<bool>,
#[serde(rename = "groupByResourceName", default, skip_serializing_if = "Option::is_none")]
pub group_by_resource_name: Option<bool>,
#[serde(rename = "groupByClientApplicationId", default, skip_serializing_if = "Option::is_none")]
pub group_by_client_application_id: Option<bool>,
#[serde(rename = "groupByUserAgent", default, skip_serializing_if = "Option::is_none")]
pub group_by_user_agent: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogAnalyticsOperationResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LogAnalyticsOutput>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LogAnalyticsOutput {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub output: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VmScaleSetConvertToSinglePlacementGroupInput {
#[serde(rename = "activePlacementGroupId", default, skip_serializing_if = "Option::is_none")]
pub active_placement_group_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OrchestrationServiceStateInput {
#[serde(rename = "serviceName")]
pub service_name: orchestration_service_state_input::ServiceName,
pub action: orchestration_service_state_input::Action,
}
pub mod orchestration_service_state_input {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ServiceName {
AutomaticRepairs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Action {
Resume,
Suspend,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PatchSettings {
#[serde(rename = "patchMode", default, skip_serializing_if = "Option::is_none")]
pub patch_mode: Option<patch_settings::PatchMode>,
#[serde(rename = "enableHotpatching", default, skip_serializing_if = "Option::is_none")]
pub enable_hotpatching: Option<bool>,
#[serde(rename = "assessmentMode", default, skip_serializing_if = "Option::is_none")]
pub assessment_mode: Option<patch_settings::AssessmentMode>,
}
pub mod patch_settings {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PatchMode {
Manual,
#[serde(rename = "AutomaticByOS")]
AutomaticByOs,
AutomaticByPlatform,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AssessmentMode {
ImageDefault,
AutomaticByPlatform,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LinuxPatchSettings {
#[serde(rename = "patchMode", default, skip_serializing_if = "Option::is_none")]
pub patch_mode: Option<linux_patch_settings::PatchMode>,
#[serde(rename = "assessmentMode", default, skip_serializing_if = "Option::is_none")]
pub assessment_mode: Option<linux_patch_settings::AssessmentMode>,
}
pub mod linux_patch_settings {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PatchMode {
ImageDefault,
AutomaticByPlatform,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AssessmentMode {
ImageDefault,
AutomaticByPlatform,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachinePatchStatus {
#[serde(rename = "availablePatchSummary", default, skip_serializing_if = "Option::is_none")]
pub available_patch_summary: Option<AvailablePatchSummary>,
#[serde(rename = "lastPatchInstallationSummary", default, skip_serializing_if = "Option::is_none")]
pub last_patch_installation_summary: Option<LastPatchInstallationSummary>,
#[serde(rename = "configurationStatuses", default, skip_serializing_if = "Vec::is_empty")]
pub configuration_statuses: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AvailablePatchSummary {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<available_patch_summary::Status>,
#[serde(rename = "assessmentActivityId", default, skip_serializing_if = "Option::is_none")]
pub assessment_activity_id: Option<String>,
#[serde(rename = "rebootPending", default, skip_serializing_if = "Option::is_none")]
pub reboot_pending: Option<bool>,
#[serde(rename = "criticalAndSecurityPatchCount", default, skip_serializing_if = "Option::is_none")]
pub critical_and_security_patch_count: Option<i32>,
#[serde(rename = "otherPatchCount", default, skip_serializing_if = "Option::is_none")]
pub other_patch_count: Option<i32>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "lastModifiedTime", default, skip_serializing_if = "Option::is_none")]
pub last_modified_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
pub mod available_patch_summary {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Unknown,
InProgress,
Failed,
Succeeded,
CompletedWithWarnings,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LastPatchInstallationSummary {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<last_patch_installation_summary::Status>,
#[serde(rename = "installationActivityId", default, skip_serializing_if = "Option::is_none")]
pub installation_activity_id: Option<String>,
#[serde(rename = "maintenanceWindowExceeded", default, skip_serializing_if = "Option::is_none")]
pub maintenance_window_exceeded: Option<bool>,
#[serde(rename = "notSelectedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub not_selected_patch_count: Option<i32>,
#[serde(rename = "excludedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub excluded_patch_count: Option<i32>,
#[serde(rename = "pendingPatchCount", default, skip_serializing_if = "Option::is_none")]
pub pending_patch_count: Option<i32>,
#[serde(rename = "installedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub installed_patch_count: Option<i32>,
#[serde(rename = "failedPatchCount", default, skip_serializing_if = "Option::is_none")]
pub failed_patch_count: Option<i32>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "lastModifiedTime", default, skip_serializing_if = "Option::is_none")]
pub last_modified_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ApiError>,
}
pub mod last_patch_installation_summary {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Unknown,
InProgress,
Failed,
Succeeded,
CompletedWithWarnings,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandInputParameter {
pub name: String,
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandInput {
#[serde(rename = "commandId")]
pub command_id: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub script: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<RunCommandInputParameter>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandParameterDefinition {
pub name: String,
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandDocumentBase {
#[serde(rename = "$schema")]
pub schema: String,
pub id: String,
#[serde(rename = "osType")]
pub os_type: run_command_document_base::OsType,
pub label: String,
pub description: String,
}
pub mod run_command_document_base {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandDocument {
#[serde(flatten)]
pub run_command_document_base: RunCommandDocumentBase,
pub script: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<RunCommandParameterDefinition>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandListResult {
pub value: Vec<RunCommandDocumentBase>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RunCommandResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<InstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommandInstanceView {
#[serde(rename = "executionState", default, skip_serializing_if = "Option::is_none")]
pub execution_state: Option<virtual_machine_run_command_instance_view::ExecutionState>,
#[serde(rename = "executionMessage", default, skip_serializing_if = "Option::is_none")]
pub execution_message: Option<String>,
#[serde(rename = "exitCode", default, skip_serializing_if = "Option::is_none")]
pub exit_code: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub output: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")]
pub start_time: Option<String>,
#[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")]
pub end_time: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<InstanceViewStatus>,
}
pub mod virtual_machine_run_command_instance_view {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ExecutionState {
Unknown,
Pending,
Running,
Failed,
Succeeded,
TimedOut,
Canceled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommandScriptSource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub script: Option<String>,
#[serde(rename = "scriptUri", default, skip_serializing_if = "Option::is_none")]
pub script_uri: Option<String>,
#[serde(rename = "commandId", default, skip_serializing_if = "Option::is_none")]
pub command_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommandProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<VirtualMachineRunCommandScriptSource>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<RunCommandInputParameter>,
#[serde(rename = "protectedParameters", default, skip_serializing_if = "Vec::is_empty")]
pub protected_parameters: Vec<RunCommandInputParameter>,
#[serde(rename = "asyncExecution", default, skip_serializing_if = "Option::is_none")]
pub async_execution: Option<bool>,
#[serde(rename = "runAsUser", default, skip_serializing_if = "Option::is_none")]
pub run_as_user: Option<String>,
#[serde(rename = "runAsPassword", default, skip_serializing_if = "Option::is_none")]
pub run_as_password: Option<String>,
#[serde(rename = "timeoutInSeconds", default, skip_serializing_if = "Option::is_none")]
pub timeout_in_seconds: Option<i32>,
#[serde(rename = "outputBlobUri", default, skip_serializing_if = "Option::is_none")]
pub output_blob_uri: Option<String>,
#[serde(rename = "errorBlobUri", default, skip_serializing_if = "Option::is_none")]
pub error_blob_uri: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<VirtualMachineRunCommandInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommand {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineRunCommandProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommandUpdate {
#[serde(flatten)]
pub update_resource: UpdateResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<VirtualMachineRunCommandProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualMachineRunCommandsListResult {
pub value: Vec<VirtualMachineRunCommand>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuCapacity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<i64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub default: Option<i64>,
#[serde(rename = "scaleType", default, skip_serializing_if = "Option::is_none")]
pub scale_type: Option<resource_sku_capacity::ScaleType>,
}
pub mod resource_sku_capacity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ScaleType {
Automatic,
Manual,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuCosts {
#[serde(rename = "meterID", default, skip_serializing_if = "Option::is_none")]
pub meter_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub quantity: Option<i64>,
#[serde(rename = "extendedUnit", default, skip_serializing_if = "Option::is_none")]
pub extended_unit: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuCapabilities {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuZoneDetails {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub name: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<ResourceSkuCapabilities>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuRestrictions {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<resource_sku_restrictions::Type>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(rename = "restrictionInfo", default, skip_serializing_if = "Option::is_none")]
pub restriction_info: Option<ResourceSkuRestrictionInfo>,
#[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")]
pub reason_code: Option<resource_sku_restrictions::ReasonCode>,
}
pub mod resource_sku_restrictions {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Location,
Zone,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
QuotaId,
NotAvailableForSubscription,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSku {
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<ResourceSkuCapacity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")]
pub location_info: Vec<ResourceSkuLocationInfo>,
#[serde(rename = "apiVersions", default, skip_serializing_if = "Vec::is_empty")]
pub api_versions: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub costs: Vec<ResourceSkuCosts>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<ResourceSkuCapabilities>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<ResourceSkuRestrictions>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuLocationInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")]
pub zone_details: Vec<ResourceSkuZoneDetails>,
#[serde(rename = "extendedLocations", default, skip_serializing_if = "Vec::is_empty")]
pub extended_locations: Vec<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<resource_sku_location_info::Type>,
}
pub mod resource_sku_location_info {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
EdgeZone,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuRestrictionInfo {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkusResult {
pub value: Vec<ResourceSku>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProxyOnlyResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Disk {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "managedBy", default, skip_serializing_if = "Option::is_none")]
pub managed_by: Option<String>,
#[serde(rename = "managedByExtended", default, skip_serializing_if = "Vec::is_empty")]
pub managed_by_extended: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<DiskSku>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiskProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiskUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<DiskSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskList {
pub value: Vec<Disk>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<disk_sku::Name>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
pub mod disk_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
#[serde(rename = "StandardSSD_LRS")]
StandardSsdLrs,
#[serde(rename = "UltraSSD_LRS")]
UltraSsdLrs,
#[serde(rename = "Premium_ZRS")]
PremiumZrs,
#[serde(rename = "StandardSSD_ZRS")]
StandardSsdZrs,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnapshotSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<snapshot_sku::Name>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
pub mod snapshot_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
#[serde(rename = "Standard_ZRS")]
StandardZrs,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskProperties {
#[serde(rename = "timeCreated", default, skip_serializing_if = "Option::is_none")]
pub time_created: Option<String>,
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<disk_properties::OsType>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<disk_properties::HyperVGeneration>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<PurchasePlan>,
#[serde(rename = "supportedCapabilities", default, skip_serializing_if = "Option::is_none")]
pub supported_capabilities: Option<SupportedCapabilities>,
#[serde(rename = "creationData")]
pub creation_data: CreationData,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "diskSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub disk_size_bytes: Option<i64>,
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
#[serde(rename = "encryptionSettingsCollection", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings_collection: Option<EncryptionSettingsCollection>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "diskIOPSReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_write: Option<i64>,
#[serde(rename = "diskMBpsReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_write: Option<i64>,
#[serde(rename = "diskIOPSReadOnly", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_only: Option<i64>,
#[serde(rename = "diskMBpsReadOnly", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_only: Option<i64>,
#[serde(rename = "diskState", default, skip_serializing_if = "Option::is_none")]
pub disk_state: Option<DiskState>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "maxShares", default, skip_serializing_if = "Option::is_none")]
pub max_shares: Option<i32>,
#[serde(rename = "shareInfo", default, skip_serializing_if = "Vec::is_empty")]
pub share_info: Vec<ShareInfoElement>,
#[serde(rename = "networkAccessPolicy", default, skip_serializing_if = "Option::is_none")]
pub network_access_policy: Option<NetworkAccessPolicy>,
#[serde(rename = "diskAccessId", default, skip_serializing_if = "Option::is_none")]
pub disk_access_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(rename = "burstingEnabled", default, skip_serializing_if = "Option::is_none")]
pub bursting_enabled: Option<bool>,
#[serde(rename = "propertyUpdatesInProgress", default, skip_serializing_if = "Option::is_none")]
pub property_updates_in_progress: Option<PropertyUpdatesInProgress>,
#[serde(rename = "supportsHibernation", default, skip_serializing_if = "Option::is_none")]
pub supports_hibernation: Option<bool>,
#[serde(rename = "securityProfile", default, skip_serializing_if = "Option::is_none")]
pub security_profile: Option<DiskSecurityProfile>,
#[serde(rename = "completionPercent", default, skip_serializing_if = "Option::is_none")]
pub completion_percent: Option<f64>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<PublicNetworkAccess>,
}
pub mod disk_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnapshotProperties {
#[serde(rename = "timeCreated", default, skip_serializing_if = "Option::is_none")]
pub time_created: Option<String>,
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<snapshot_properties::OsType>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<snapshot_properties::HyperVGeneration>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<PurchasePlan>,
#[serde(rename = "supportedCapabilities", default, skip_serializing_if = "Option::is_none")]
pub supported_capabilities: Option<SupportedCapabilities>,
#[serde(rename = "creationData")]
pub creation_data: CreationData,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "diskSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub disk_size_bytes: Option<i64>,
#[serde(rename = "diskState", default, skip_serializing_if = "Option::is_none")]
pub disk_state: Option<DiskState>,
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
#[serde(rename = "encryptionSettingsCollection", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings_collection: Option<EncryptionSettingsCollection>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub incremental: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "networkAccessPolicy", default, skip_serializing_if = "Option::is_none")]
pub network_access_policy: Option<NetworkAccessPolicy>,
#[serde(rename = "diskAccessId", default, skip_serializing_if = "Option::is_none")]
pub disk_access_id: Option<String>,
#[serde(rename = "supportsHibernation", default, skip_serializing_if = "Option::is_none")]
pub supports_hibernation: Option<bool>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<PublicNetworkAccess>,
#[serde(rename = "completionPercent", default, skip_serializing_if = "Option::is_none")]
pub completion_percent: Option<f64>,
}
pub mod snapshot_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ShareInfoElement {
#[serde(rename = "vmUri", default, skip_serializing_if = "Option::is_none")]
pub vm_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionSetProperties {
#[serde(rename = "encryptionType", default, skip_serializing_if = "Option::is_none")]
pub encryption_type: Option<DiskEncryptionSetType>,
#[serde(rename = "activeKey", default, skip_serializing_if = "Option::is_none")]
pub active_key: Option<KeyForDiskEncryptionSet>,
#[serde(rename = "previousKeys", default, skip_serializing_if = "Vec::is_empty")]
pub previous_keys: Vec<KeyForDiskEncryptionSet>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "rotationToLatestKeyVersionEnabled", default, skip_serializing_if = "Option::is_none")]
pub rotation_to_latest_key_version_enabled: Option<bool>,
#[serde(rename = "lastKeyRotationTimestamp", default, skip_serializing_if = "Option::is_none")]
pub last_key_rotation_timestamp: Option<String>,
#[serde(rename = "autoKeyRotationError", default, skip_serializing_if = "Option::is_none")]
pub auto_key_rotation_error: Option<ApiError>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionSettingsCollection {
pub enabled: bool,
#[serde(rename = "encryptionSettings", default, skip_serializing_if = "Vec::is_empty")]
pub encryption_settings: Vec<EncryptionSettingsElement>,
#[serde(rename = "encryptionSettingsVersion", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionSettingsElement {
#[serde(rename = "diskEncryptionKey", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_key: Option<KeyVaultAndSecretReference>,
#[serde(rename = "keyEncryptionKey", default, skip_serializing_if = "Option::is_none")]
pub key_encryption_key: Option<KeyVaultAndKeyReference>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultAndSecretReference {
#[serde(rename = "sourceVault")]
pub source_vault: SourceVault,
#[serde(rename = "secretUrl")]
pub secret_url: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultAndKeyReference {
#[serde(rename = "sourceVault")]
pub source_vault: SourceVault,
#[serde(rename = "keyUrl")]
pub key_url: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyForDiskEncryptionSet {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SourceVault>,
#[serde(rename = "keyUrl")]
pub key_url: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SourceVault {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum EncryptionType {
EncryptionAtRestWithPlatformKey,
EncryptionAtRestWithCustomerKey,
EncryptionAtRestWithPlatformAndCustomerKeys,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DiskEncryptionSetType {
EncryptionAtRestWithCustomerKey,
EncryptionAtRestWithPlatformAndCustomerKeys,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Encryption {
#[serde(rename = "diskEncryptionSetId", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_set_id: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<EncryptionType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum NetworkAccessPolicy {
AllowAll,
AllowPrivate,
DenyAll,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskUpdateProperties {
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<disk_update_properties::OsType>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "encryptionSettingsCollection", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings_collection: Option<EncryptionSettingsCollection>,
#[serde(rename = "diskIOPSReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_write: Option<i64>,
#[serde(rename = "diskMBpsReadWrite", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_write: Option<i64>,
#[serde(rename = "diskIOPSReadOnly", default, skip_serializing_if = "Option::is_none")]
pub disk_iops_read_only: Option<i64>,
#[serde(rename = "diskMBpsReadOnly", default, skip_serializing_if = "Option::is_none")]
pub disk_m_bps_read_only: Option<i64>,
#[serde(rename = "maxShares", default, skip_serializing_if = "Option::is_none")]
pub max_shares: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "networkAccessPolicy", default, skip_serializing_if = "Option::is_none")]
pub network_access_policy: Option<NetworkAccessPolicy>,
#[serde(rename = "diskAccessId", default, skip_serializing_if = "Option::is_none")]
pub disk_access_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(rename = "burstingEnabled", default, skip_serializing_if = "Option::is_none")]
pub bursting_enabled: Option<bool>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<PurchasePlan>,
#[serde(rename = "supportedCapabilities", default, skip_serializing_if = "Option::is_none")]
pub supported_capabilities: Option<SupportedCapabilities>,
#[serde(rename = "propertyUpdatesInProgress", default, skip_serializing_if = "Option::is_none")]
pub property_updates_in_progress: Option<PropertyUpdatesInProgress>,
#[serde(rename = "supportsHibernation", default, skip_serializing_if = "Option::is_none")]
pub supports_hibernation: Option<bool>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<PublicNetworkAccess>,
}
pub mod disk_update_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnapshotUpdateProperties {
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<snapshot_update_properties::OsType>,
#[serde(rename = "diskSizeGB", default, skip_serializing_if = "Option::is_none")]
pub disk_size_gb: Option<i32>,
#[serde(rename = "encryptionSettingsCollection", default, skip_serializing_if = "Option::is_none")]
pub encryption_settings_collection: Option<EncryptionSettingsCollection>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "networkAccessPolicy", default, skip_serializing_if = "Option::is_none")]
pub network_access_policy: Option<NetworkAccessPolicy>,
#[serde(rename = "diskAccessId", default, skip_serializing_if = "Option::is_none")]
pub disk_access_id: Option<String>,
#[serde(rename = "supportsHibernation", default, skip_serializing_if = "Option::is_none")]
pub supports_hibernation: Option<bool>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<PublicNetworkAccess>,
}
pub mod snapshot_update_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSetUpdateProperties {
#[serde(rename = "encryptionType", default, skip_serializing_if = "Option::is_none")]
pub encryption_type: Option<DiskEncryptionSetType>,
#[serde(rename = "activeKey", default, skip_serializing_if = "Option::is_none")]
pub active_key: Option<KeyForDiskEncryptionSet>,
#[serde(rename = "rotationToLatestKeyVersionEnabled", default, skip_serializing_if = "Option::is_none")]
pub rotation_to_latest_key_version_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DiskState {
Unattached,
Attached,
Reserved,
Frozen,
#[serde(rename = "ActiveSAS")]
ActiveSas,
#[serde(rename = "ActiveSASFrozen")]
ActiveSasFrozen,
ReadyToUpload,
ActiveUpload,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CreationData {
#[serde(rename = "createOption")]
pub create_option: creation_data::CreateOption,
#[serde(rename = "storageAccountId", default, skip_serializing_if = "Option::is_none")]
pub storage_account_id: Option<String>,
#[serde(rename = "imageReference", default, skip_serializing_if = "Option::is_none")]
pub image_reference: Option<ImageDiskReference>,
#[serde(rename = "galleryImageReference", default, skip_serializing_if = "Option::is_none")]
pub gallery_image_reference: Option<ImageDiskReference>,
#[serde(rename = "sourceUri", default, skip_serializing_if = "Option::is_none")]
pub source_uri: Option<String>,
#[serde(rename = "sourceResourceId", default, skip_serializing_if = "Option::is_none")]
pub source_resource_id: Option<String>,
#[serde(rename = "sourceUniqueId", default, skip_serializing_if = "Option::is_none")]
pub source_unique_id: Option<String>,
#[serde(rename = "uploadSizeBytes", default, skip_serializing_if = "Option::is_none")]
pub upload_size_bytes: Option<i64>,
#[serde(rename = "logicalSectorSize", default, skip_serializing_if = "Option::is_none")]
pub logical_sector_size: Option<i32>,
}
pub mod creation_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreateOption {
Empty,
Attach,
FromImage,
Import,
Copy,
Restore,
Upload,
CopyStart,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImageDiskReference {
pub id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub lun: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SupportedCapabilities {
#[serde(rename = "acceleratedNetwork", default, skip_serializing_if = "Option::is_none")]
pub accelerated_network: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PropertyUpdatesInProgress {
#[serde(rename = "targetTier", default, skip_serializing_if = "Option::is_none")]
pub target_tier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum DiskSecurityType {
TrustedLaunch,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskSecurityProfile {
#[serde(rename = "securityType", default, skip_serializing_if = "Option::is_none")]
pub security_type: Option<DiskSecurityType>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GrantAccessData {
pub access: grant_access_data::Access,
#[serde(rename = "durationInSeconds")]
pub duration_in_seconds: i32,
}
pub mod grant_access_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Access {
None,
Read,
Write,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessUri {
#[serde(rename = "accessSAS", default, skip_serializing_if = "Option::is_none")]
pub access_sas: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Snapshot {
#[serde(flatten)]
pub resource: Resource,
#[serde(rename = "managedBy", default, skip_serializing_if = "Option::is_none")]
pub managed_by: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<SnapshotSku>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SnapshotProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnapshotUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SnapshotUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<SnapshotSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SnapshotList {
pub value: Vec<Snapshot>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionSetIdentity {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<encryption_set_identity::Type>,
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
}
pub mod encryption_set_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSet {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<EncryptionSetIdentity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EncryptionSetProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSetUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiskEncryptionSetUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<EncryptionSetIdentity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskEncryptionSetList {
pub value: Vec<DiskEncryptionSet>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceUriList {
pub value: Vec<String>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskAccessProperties {
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<PrivateEndpointConnection>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "timeCreated", default, skip_serializing_if = "Option::is_none")]
pub time_created: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskAccess {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiskAccessProperties>,
#[serde(rename = "extendedLocation", default, skip_serializing_if = "Option::is_none")]
pub extended_location: Option<ExtendedLocation>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskAccessList {
pub value: Vec<DiskAccess>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskAccessUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskRestorePoint {
#[serde(flatten)]
pub proxy_only_resource: ProxyOnlyResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiskRestorePointProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskRestorePointList {
pub value: Vec<DiskRestorePoint>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskRestorePointProperties {
#[serde(rename = "timeCreated", default, skip_serializing_if = "Option::is_none")]
pub time_created: Option<String>,
#[serde(rename = "sourceResourceId", default, skip_serializing_if = "Option::is_none")]
pub source_resource_id: Option<String>,
#[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")]
pub os_type: Option<disk_restore_point_properties::OsType>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<disk_restore_point_properties::HyperVGeneration>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<PurchasePlan>,
#[serde(rename = "supportedCapabilities", default, skip_serializing_if = "Option::is_none")]
pub supported_capabilities: Option<SupportedCapabilities>,
#[serde(rename = "familyId", default, skip_serializing_if = "Option::is_none")]
pub family_id: Option<String>,
#[serde(rename = "sourceUniqueId", default, skip_serializing_if = "Option::is_none")]
pub source_unique_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<Encryption>,
#[serde(rename = "supportsHibernation", default, skip_serializing_if = "Option::is_none")]
pub supports_hibernation: Option<bool>,
#[serde(rename = "networkAccessPolicy", default, skip_serializing_if = "Option::is_none")]
pub network_access_policy: Option<NetworkAccessPolicy>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<PublicNetworkAccess>,
#[serde(rename = "diskAccessId", default, skip_serializing_if = "Option::is_none")]
pub disk_access_id: Option<String>,
#[serde(rename = "completionPercent", default, skip_serializing_if = "Option::is_none")]
pub completion_percent: Option<f64>,
}
pub mod disk_restore_point_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateEndpointConnection>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Gallery {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryUpdate {
#[serde(flatten)]
pub update_resource_definition: UpdateResourceDefinition,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identifier: Option<GalleryIdentifier>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<gallery_properties::ProvisioningState>,
#[serde(rename = "sharingProfile", default, skip_serializing_if = "Option::is_none")]
pub sharing_profile: Option<SharingProfile>,
#[serde(rename = "softDeletePolicy", default, skip_serializing_if = "Option::is_none")]
pub soft_delete_policy: Option<SoftDeletePolicy>,
}
pub mod gallery_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Creating,
Updating,
Failed,
Succeeded,
Deleting,
Migrating,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryIdentifier {
#[serde(rename = "uniqueName", default, skip_serializing_if = "Option::is_none")]
pub unique_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharingProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub permissions: Option<sharing_profile::Permissions>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<SharingProfileGroup>,
}
pub mod sharing_profile {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Permissions {
Private,
Groups,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharingProfileGroup {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<sharing_profile_group::Type>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub ids: Vec<String>,
}
pub mod sharing_profile_group {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Subscriptions,
#[serde(rename = "AADTenants")]
AadTenants,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SoftDeletePolicy {
#[serde(rename = "isSoftDeleteEnabled", default, skip_serializing_if = "Option::is_none")]
pub is_soft_delete_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplication {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryApplicationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationUpdate {
#[serde(flatten)]
pub update_resource_definition: UpdateResourceDefinition,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryApplicationProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub eula: Option<String>,
#[serde(rename = "privacyStatementUri", default, skip_serializing_if = "Option::is_none")]
pub privacy_statement_uri: Option<String>,
#[serde(rename = "releaseNoteUri", default, skip_serializing_if = "Option::is_none")]
pub release_note_uri: Option<String>,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
#[serde(rename = "supportedOSType")]
pub supported_os_type: gallery_application_properties::SupportedOsType,
}
pub mod gallery_application_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SupportedOsType {
Windows,
Linux,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationVersion {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryApplicationVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationVersionUpdate {
#[serde(flatten)]
pub update_resource_definition: UpdateResourceDefinition,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryApplicationVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationVersionProperties {
#[serde(rename = "publishingProfile")]
pub publishing_profile: GalleryApplicationVersionPublishingProfile,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<gallery_application_version_properties::ProvisioningState>,
#[serde(rename = "replicationStatus", default, skip_serializing_if = "Option::is_none")]
pub replication_status: Option<ReplicationStatus>,
}
pub mod gallery_application_version_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Creating,
Updating,
Failed,
Succeeded,
Deleting,
Migrating,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationVersionPublishingProfile {
#[serde(flatten)]
pub gallery_artifact_publishing_profile_base: GalleryArtifactPublishingProfileBase,
pub source: UserArtifactSource,
#[serde(rename = "manageActions", default, skip_serializing_if = "Option::is_none")]
pub manage_actions: Option<UserArtifactManage>,
#[serde(rename = "enableHealthCheck", default, skip_serializing_if = "Option::is_none")]
pub enable_health_check: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserArtifactSource {
#[serde(rename = "mediaLink")]
pub media_link: String,
#[serde(rename = "defaultConfigurationLink", default, skip_serializing_if = "Option::is_none")]
pub default_configuration_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserArtifactManage {
pub install: String,
pub remove: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub update: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImage {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageUpdate {
#[serde(flatten)]
pub update_resource_definition: UpdateResourceDefinition,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub eula: Option<String>,
#[serde(rename = "privacyStatementUri", default, skip_serializing_if = "Option::is_none")]
pub privacy_statement_uri: Option<String>,
#[serde(rename = "releaseNoteUri", default, skip_serializing_if = "Option::is_none")]
pub release_note_uri: Option<String>,
#[serde(rename = "osType")]
pub os_type: gallery_image_properties::OsType,
#[serde(rename = "osState")]
pub os_state: gallery_image_properties::OsState,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<gallery_image_properties::HyperVGeneration>,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
pub identifier: GalleryImageIdentifier,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recommended: Option<RecommendedMachineConfiguration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disallowed: Option<Disallowed>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<ImagePurchasePlan>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<gallery_image_properties::ProvisioningState>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub features: Vec<GalleryImageFeature>,
}
pub mod gallery_image_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsState {
Generalized,
Specialized,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Creating,
Updating,
Failed,
Succeeded,
Deleting,
Migrating,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageFeature {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageIdentifier {
pub publisher: String,
pub offer: String,
pub sku: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecommendedMachineConfiguration {
#[serde(rename = "vCPUs", default, skip_serializing_if = "Option::is_none")]
pub v_cp_us: Option<ResourceRange>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub memory: Option<ResourceRange>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceRange {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub min: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub max: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Disallowed {
#[serde(rename = "diskTypes", default, skip_serializing_if = "Vec::is_empty")]
pub disk_types: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ImagePurchasePlan {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub product: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersion {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryImageVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersionUpdate {
#[serde(flatten)]
pub update_resource_definition: UpdateResourceDefinition,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GalleryImageVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersionProperties {
#[serde(rename = "publishingProfile", default, skip_serializing_if = "Option::is_none")]
pub publishing_profile: Option<GalleryImageVersionPublishingProfile>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<gallery_image_version_properties::ProvisioningState>,
#[serde(rename = "storageProfile")]
pub storage_profile: GalleryImageVersionStorageProfile,
#[serde(rename = "replicationStatus", default, skip_serializing_if = "Option::is_none")]
pub replication_status: Option<ReplicationStatus>,
}
pub mod gallery_image_version_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
Creating,
Updating,
Failed,
Succeeded,
Deleting,
Migrating,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryArtifactPublishingProfileBase {
#[serde(rename = "targetRegions", default, skip_serializing_if = "Vec::is_empty")]
pub target_regions: Vec<TargetRegion>,
#[serde(rename = "replicaCount", default, skip_serializing_if = "Option::is_none")]
pub replica_count: Option<i32>,
#[serde(rename = "excludeFromLatest", default, skip_serializing_if = "Option::is_none")]
pub exclude_from_latest: Option<bool>,
#[serde(rename = "publishedDate", default, skip_serializing_if = "Option::is_none")]
pub published_date: Option<String>,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
#[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")]
pub storage_account_type: Option<gallery_artifact_publishing_profile_base::StorageAccountType>,
#[serde(rename = "replicationMode", default, skip_serializing_if = "Option::is_none")]
pub replication_mode: Option<gallery_artifact_publishing_profile_base::ReplicationMode>,
}
pub mod gallery_artifact_publishing_profile_base {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StorageAccountType {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Standard_ZRS")]
StandardZrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReplicationMode {
Full,
Shallow,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TargetRegion {
pub name: String,
#[serde(rename = "regionalReplicaCount", default, skip_serializing_if = "Option::is_none")]
pub regional_replica_count: Option<i32>,
#[serde(rename = "storageAccountType", default, skip_serializing_if = "Option::is_none")]
pub storage_account_type: Option<target_region::StorageAccountType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub encryption: Option<EncryptionImages>,
}
pub mod target_region {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StorageAccountType {
#[serde(rename = "Standard_LRS")]
StandardLrs,
#[serde(rename = "Standard_ZRS")]
StandardZrs,
#[serde(rename = "Premium_LRS")]
PremiumLrs,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EncryptionImages {
#[serde(rename = "osDiskImage", default, skip_serializing_if = "Option::is_none")]
pub os_disk_image: Option<OsDiskImageEncryption>,
#[serde(rename = "dataDiskImages", default, skip_serializing_if = "Vec::is_empty")]
pub data_disk_images: Vec<DataDiskImageEncryption>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsDiskImageEncryption {
#[serde(flatten)]
pub disk_image_encryption: DiskImageEncryption,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataDiskImageEncryption {
#[serde(flatten)]
pub disk_image_encryption: DiskImageEncryption,
pub lun: i32,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiskImageEncryption {
#[serde(rename = "diskEncryptionSetId", default, skip_serializing_if = "Option::is_none")]
pub disk_encryption_set_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryArtifactSource {
#[serde(rename = "managedImage")]
pub managed_image: ManagedArtifact,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ManagedArtifact {
pub id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersionPublishingProfile {
#[serde(flatten)]
pub gallery_artifact_publishing_profile_base: GalleryArtifactPublishingProfileBase,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersionStorageProfile {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<GalleryArtifactVersionSource>,
#[serde(rename = "osDiskImage", default, skip_serializing_if = "Option::is_none")]
pub os_disk_image: Option<GalleryOsDiskImage>,
#[serde(rename = "dataDiskImages", default, skip_serializing_if = "Vec::is_empty")]
pub data_disk_images: Vec<GalleryDataDiskImage>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryArtifactVersionSource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryOsDiskImage {
#[serde(flatten)]
pub gallery_disk_image: GalleryDiskImage,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryDataDiskImage {
#[serde(flatten)]
pub gallery_disk_image: GalleryDiskImage,
pub lun: i32,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryDiskImage {
#[serde(rename = "sizeInGB", default, skip_serializing_if = "Option::is_none")]
pub size_in_gb: Option<i32>,
#[serde(rename = "hostCaching", default, skip_serializing_if = "Option::is_none")]
pub host_caching: Option<gallery_disk_image::HostCaching>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub source: Option<GalleryArtifactVersionSource>,
}
pub mod gallery_disk_image {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HostCaching {
None,
ReadOnly,
ReadWrite,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReplicationStatus {
#[serde(rename = "aggregatedState", default, skip_serializing_if = "Option::is_none")]
pub aggregated_state: Option<replication_status::AggregatedState>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub summary: Vec<RegionalReplicationStatus>,
}
pub mod replication_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AggregatedState {
Unknown,
InProgress,
Completed,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegionalReplicationStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<regional_replication_status::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub details: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub progress: Option<i32>,
}
pub mod regional_replication_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
Unknown,
Replicating,
Completed,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryList {
pub value: Vec<Gallery>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageList {
pub value: Vec<GalleryImage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryImageVersionList {
pub value: Vec<GalleryImageVersion>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationList {
pub value: Vec<GalleryApplication>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GalleryApplicationVersionList {
pub value: Vec<GalleryApplicationVersion>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateResourceDefinition {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharingUpdate {
#[serde(rename = "operationType")]
pub operation_type: sharing_update::OperationType,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<SharingProfileGroup>,
}
pub mod sharing_update {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationType {
Add,
Remove,
Reset,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PirResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PirSharedGalleryResource {
#[serde(flatten)]
pub pir_resource: PirResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identifier: Option<SharedGalleryIdentifier>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryIdentifier {
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryList {
pub value: Vec<SharedGallery>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGallery {
#[serde(flatten)]
pub pir_shared_gallery_resource: PirSharedGalleryResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImageList {
pub value: Vec<SharedGalleryImage>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImage {
#[serde(flatten)]
pub pir_shared_gallery_resource: PirSharedGalleryResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SharedGalleryImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImageProperties {
#[serde(rename = "osType")]
pub os_type: shared_gallery_image_properties::OsType,
#[serde(rename = "osState")]
pub os_state: shared_gallery_image_properties::OsState,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
pub identifier: GalleryImageIdentifier,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recommended: Option<RecommendedMachineConfiguration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disallowed: Option<Disallowed>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<shared_gallery_image_properties::HyperVGeneration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub features: Vec<GalleryImageFeature>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<ImagePurchasePlan>,
}
pub mod shared_gallery_image_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsState {
Generalized,
Specialized,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImageVersionList {
pub value: Vec<SharedGalleryImageVersion>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImageVersion {
#[serde(flatten)]
pub pir_shared_gallery_resource: PirSharedGalleryResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SharedGalleryImageVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SharedGalleryImageVersionProperties {
#[serde(rename = "publishedDate", default, skip_serializing_if = "Option::is_none")]
pub published_date: Option<String>,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PirCommunityGalleryResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identifier: Option<CommunityGalleryIdentifier>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGalleryIdentifier {
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGallery {
#[serde(flatten)]
pub pir_community_gallery_resource: PirCommunityGalleryResource,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGalleryImage {
#[serde(flatten)]
pub pir_community_gallery_resource: PirCommunityGalleryResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CommunityGalleryImageProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGalleryImageProperties {
#[serde(rename = "osType")]
pub os_type: community_gallery_image_properties::OsType,
#[serde(rename = "osState")]
pub os_state: community_gallery_image_properties::OsState,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
pub identifier: GalleryImageIdentifier,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recommended: Option<RecommendedMachineConfiguration>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub disallowed: Option<Disallowed>,
#[serde(rename = "hyperVGeneration", default, skip_serializing_if = "Option::is_none")]
pub hyper_v_generation: Option<community_gallery_image_properties::HyperVGeneration>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub features: Vec<GalleryImageFeature>,
#[serde(rename = "purchasePlan", default, skip_serializing_if = "Option::is_none")]
pub purchase_plan: Option<ImagePurchasePlan>,
}
pub mod community_gallery_image_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsType {
Windows,
Linux,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OsState {
Generalized,
Specialized,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HyperVGeneration {
V1,
V2,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGalleryImageVersion {
#[serde(flatten)]
pub pir_community_gallery_resource: PirCommunityGalleryResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CommunityGalleryImageVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CommunityGalleryImageVersionProperties {
#[serde(rename = "publishedDate", default, skip_serializing_if = "Option::is_none")]
pub published_date: Option<String>,
#[serde(rename = "endOfLifeDate", default, skip_serializing_if = "Option::is_none")]
pub end_of_life_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceNetworkProfile {
#[serde(rename = "networkInterfaces", default, skip_serializing_if = "Vec::is_empty")]
pub network_interfaces: Vec<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceInstanceViewStatus {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(rename = "displayStatus", default, skip_serializing_if = "Option::is_none")]
pub display_status: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub level: Option<resource_instance_view_status::Level>,
}
pub mod resource_instance_view_status {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Level {
Info,
Warning,
Error,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceInstanceView {
#[serde(rename = "platformUpdateDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_update_domain: Option<i32>,
#[serde(rename = "platformFaultDomain", default, skip_serializing_if = "Option::is_none")]
pub platform_fault_domain: Option<i32>,
#[serde(rename = "privateId", default, skip_serializing_if = "Option::is_none")]
pub private_id: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<ResourceInstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceProperties {
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<RoleInstanceNetworkProfile>,
#[serde(rename = "instanceView", default, skip_serializing_if = "Option::is_none")]
pub instance_view: Option<RoleInstanceInstanceView>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstance {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<InstanceSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RoleInstanceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstanceListResult {
pub value: Vec<RoleInstance>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProperties {
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRole {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<CloudServiceRoleSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceRoleProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleListResult {
pub value: Vec<CloudServiceRole>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CloudServiceUpgradeMode {
Auto,
Manual,
Simultaneous,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProfileProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<CloudServiceRoleSku>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceRoleProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub roles: Vec<CloudServiceRoleProfileProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct | {
#[serde(rename = "certificateUrl", default, skip_serializing_if = "Option::is_none")]
pub certificate_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceVaultSecretGroup {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SubResource>,
#[serde(rename = "vaultCertificates", default, skip_serializing_if = "Vec::is_empty")]
pub vault_certificates: Vec<CloudServiceVaultCertificate>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceOsProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub secrets: Vec<CloudServiceVaultSecretGroup>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerFrontendIpConfigurationProperties {
#[serde(rename = "publicIPAddress", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address: Option<SubResource>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnet: Option<SubResource>,
#[serde(rename = "privateIPAddress", default, skip_serializing_if = "Option::is_none")]
pub private_ip_address: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerFrontendIpConfiguration {
pub name: String,
pub properties: LoadBalancerFrontendIpConfigurationProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerConfigurationProperties {
#[serde(rename = "frontendIPConfigurations")]
pub frontend_ip_configurations: Vec<LoadBalancerFrontendIpConfiguration>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoadBalancerConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
pub name: String,
pub properties: LoadBalancerConfigurationProperties,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceNetworkProfile {
#[serde(rename = "loadBalancerConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub load_balancer_configurations: Vec<LoadBalancerConfiguration>,
#[serde(rename = "swappableCloudService", default, skip_serializing_if = "Option::is_none")]
pub swappable_cloud_service: Option<SubResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceVaultAndSecretReference {
#[serde(rename = "sourceVault", default, skip_serializing_if = "Option::is_none")]
pub source_vault: Option<SubResource>,
#[serde(rename = "secretUrl", default, skip_serializing_if = "Option::is_none")]
pub secret_url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceExtensionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub publisher: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(rename = "typeHandlerVersion", default, skip_serializing_if = "Option::is_none")]
pub type_handler_version: Option<String>,
#[serde(rename = "autoUpgradeMinorVersion", default, skip_serializing_if = "Option::is_none")]
pub auto_upgrade_minor_version: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<String>,
#[serde(rename = "protectedSettings", default, skip_serializing_if = "Option::is_none")]
pub protected_settings: Option<String>,
#[serde(rename = "protectedSettingsFromKeyVault", default, skip_serializing_if = "Option::is_none")]
pub protected_settings_from_key_vault: Option<CloudServiceVaultAndSecretReference>,
#[serde(rename = "forceUpdateTag", default, skip_serializing_if = "Option::is_none")]
pub force_update_tag: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "rolesAppliedTo", default, skip_serializing_if = "Vec::is_empty")]
pub roles_applied_to: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Extension {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceExtensionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceExtensionProfile {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub extensions: Vec<Extension>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceProperties {
#[serde(rename = "packageUrl", default, skip_serializing_if = "Option::is_none")]
pub package_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub configuration: Option<String>,
#[serde(rename = "configurationUrl", default, skip_serializing_if = "Option::is_none")]
pub configuration_url: Option<String>,
#[serde(rename = "startCloudService", default, skip_serializing_if = "Option::is_none")]
pub start_cloud_service: Option<bool>,
#[serde(rename = "allowModelOverride", default, skip_serializing_if = "Option::is_none")]
pub allow_model_override: Option<bool>,
#[serde(rename = "upgradeMode", default, skip_serializing_if = "Option::is_none")]
pub upgrade_mode: Option<CloudServiceUpgradeMode>,
#[serde(rename = "roleProfile", default, skip_serializing_if = "Option::is_none")]
pub role_profile: Option<CloudServiceRoleProfile>,
#[serde(rename = "osProfile", default, skip_serializing_if = "Option::is_none")]
pub os_profile: Option<CloudServiceOsProfile>,
#[serde(rename = "networkProfile", default, skip_serializing_if = "Option::is_none")]
pub network_profile: Option<CloudServiceNetworkProfile>,
#[serde(rename = "extensionProfile", default, skip_serializing_if = "Option::is_none")]
pub extension_profile: Option<CloudServiceExtensionProfile>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "uniqueId", default, skip_serializing_if = "Option::is_none")]
pub unique_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudService {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CloudServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceUpdate {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct StatusCodeCount {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct InstanceViewStatusesSummary {
#[serde(rename = "statusesSummary", default, skip_serializing_if = "Vec::is_empty")]
pub statuses_summary: Vec<StatusCodeCount>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceInstanceView {
#[serde(rename = "roleInstance", default, skip_serializing_if = "Option::is_none")]
pub role_instance: Option<InstanceViewStatusesSummary>,
#[serde(rename = "sdkVersion", default, skip_serializing_if = "Option::is_none")]
pub sdk_version: Option<String>,
#[serde(rename = "privateIds", default, skip_serializing_if = "Vec::is_empty")]
pub private_ids: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub statuses: Vec<ResourceInstanceViewStatus>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CloudServiceListResult {
pub value: Vec<CloudService>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RoleInstances {
#[serde(rename = "roleInstances")]
pub role_instances: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateDomain {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UpdateDomainListResult {
pub value: Vec<UpdateDomain>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsVersionProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(rename = "familyLabel", default, skip_serializing_if = "Option::is_none")]
pub family_label: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(rename = "isActive", default, skip_serializing_if = "Option::is_none")]
pub is_active: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsVersion {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OsVersionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsVersionListResult {
pub value: Vec<OsVersion>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsVersionPropertiesBase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(rename = "isActive", default, skip_serializing_if = "Option::is_none")]
pub is_active: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsFamilyProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub label: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub versions: Vec<OsVersionPropertiesBase>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsFamily {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OsFamilyProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OsFamilyListResult {
pub value: Vec<OsFamily>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
| CloudServiceVaultCertificate |
print.rs | fn | () {
println!("{}", 2);
println!("{}", "b");
println!("{} {}", 2, "b");
}
| main |
rest.go | package imagestreamimage
import (
kapi "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/runtime"
"github.com/openshift/origin/pkg/image/api"
"github.com/openshift/origin/pkg/image/registry/image"
"github.com/openshift/origin/pkg/image/registry/imagestream"
)
// REST implements the RESTStorage interface in terms of an image registry and
// image stream registry. It only supports the Get method and is used
// to retrieve an image by id, scoped to an ImageStream. REST ensures
// that the requested image belongs to the specified ImageStream.
type REST struct {
imageRegistry image.Registry
imageStreamRegistry imagestream.Registry
}
// NewREST returns a new REST.
func NewREST(imageRegistry image.Registry, imageStreamRegistry imagestream.Registry) *REST {
return &REST{imageRegistry, imageStreamRegistry}
}
// New is only implemented to make REST implement RESTStorage
func (r *REST) New() runtime.Object {
return &api.ImageStreamImage{}
}
// parseNameAndID splits a string into its name component and ID component, and returns an error
// if the string is not in the right form.
func parseNameAndID(input string) (name string, id string, err error) |
// Get retrieves an image by ID that has previously been tagged into an image stream.
// `id` is of the form <repo name>@<image id>.
func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) {
name, imageID, err := parseNameAndID(id)
if err != nil {
return nil, err
}
repo, err := r.imageStreamRegistry.GetImageStream(ctx, name)
if err != nil {
return nil, err
}
if repo.Status.Tags == nil {
return nil, errors.NewNotFound(api.Resource("imagestreamimage"), id)
}
event, err := api.ResolveImageID(repo, imageID)
if err != nil {
return nil, err
}
imageName := event.Image
image, err := r.imageRegistry.GetImage(ctx, imageName)
if err != nil {
return nil, err
}
if err := api.ImageWithMetadata(image); err != nil {
return nil, err
}
image.DockerImageManifest = ""
isi := api.ImageStreamImage{
ObjectMeta: kapi.ObjectMeta{
Namespace: kapi.NamespaceValue(ctx),
Name: api.MakeImageStreamImageName(name, imageID),
CreationTimestamp: image.ObjectMeta.CreationTimestamp,
},
Image: *image,
}
return &isi, nil
}
| {
name, id, err = api.ParseImageStreamImageName(input)
if err != nil {
err = errors.NewBadRequest("ImageStreamImages must be retrieved with <name>@<id>")
}
return
} |
main.ts | import { ValidationPipe } from '@nestjs/common';
import { NestFactory } from '@nestjs/core';
import { json, urlencoded } from 'express';
import { AppModule } from './app.module';
import { SwaggerModule, DocumentBuilder } from '@nestjs/swagger';
import * as cookieParser from 'cookie-parser';
import { LoggingService } from './modules/logging/logging.service';
async function bootstrap() {
const app = await NestFactory.create(AppModule, { cors: false });
app.useGlobalPipes(new ValidationPipe({ whitelist: true, transform: true }));
app.setGlobalPrefix('/v1');
app.use(json({ limit: '20mb' }));
app.use(urlencoded({ limit: '20mb', extended: true }));
app.use(cookieParser());
app.useLogger(app.get(LoggingService));
app.enableCors({
credentials: true,
origin: true
}); | const config = new DocumentBuilder()
.setTitle('Nestjs skelet')
.setDescription('Project skelet')
.setVersion('1.0')
.build();
const document = SwaggerModule.createDocument(app, config);
SwaggerModule.setup('api', app, document);
await app.listen(3000);
}
bootstrap(); | |
index.js | import React from "react"
import { Link, graphql } from "gatsby"
import Layout from "../../components/layout"
import SEO from "../../components/seo"
import styled from "styled-components"
const SponsorGrid = styled.div`
display: flex;
width: 100%;
justify-content: space-around;
flex-wrap: wrap;
`
const Level = styled.div`
width: 30%;
@media (max-width: 700px) {
width: 100%;
}
padding: 10px 10px 50px 10px;
border-radius: 5px;
border: 1px solid #eee;
position: relative;
h1 {
font-size: 20px;
margin-top: 0;
text-align: center;
}
p {
}
a {
position: absolute;
bottom: 0;
left: 0;
right: 0;
margin: 10px;
padding: 10px;
text-align: center;
background: #000;
color: #fff;
font-weight: 800;
border-radius: 5px;
&:hover {
background: #eee;
color: #000;
}
}
`
class | extends React.Component {
render() {
const { data } = this.props
const title = data.site.siteMetadata.title
return (
<Layout location={this.props.location} title={title}>
<SEO title="Sponsor a Newsletter" />
<h1>Sponsor {title}</h1>
<p>
{title} is read by hundreds of designers and foreign policy
professionals around the world. Support us and get your message out by
sponsoring a weekly feature, booking a classified ad, or featuring a
job posting.
</p>
<SponsorGrid>
<Level>
<h1>Sponsor an Issue</h1>
<p>
Issue sponsors are posted at the top, right below are
introduction. They get a highlighted treatment, more text per
newsletter, and an image. You make the magic happen!
</p>
<Link to={`/sponsor/issue`}>Sponsor an Issue</Link>
</Level>
<Level>
<h1>Share a Job Post</h1>
<p>
We share relevant job posts with our community because we want
great people filling them. Featured job posts are highlighted and
contain more information.
</p>
<Link to={`/sponsor/job`}>Post a Job</Link>
</Level>
<Level>
<h1>Post an Ad</h1>
<p>
Ads are small posts at the bottom of the newsletter. We feature a
maximum of 4 of these small ads per newsletter.
</p>
<Link to={`/sponsor/ad`}>Post Ad</Link>
</Level>
</SponsorGrid>
<h2>Terms and Conditions</h2>
<p>
Basically, we reserve the right to reject ads on content grounds.
You'll be refunded minus any processing fees if that's the case.
Unless the content is clearly hateful or discriminatory, we'll reach
out first to discuss.
</p>
</Layout>
)
}
}
export default Sponsor
export const pageQuery = graphql`
query {
site {
siteMetadata {
title
subtitle
}
}
}
`
| Sponsor |
xplmi__task_8h.js | [ "TaskPriority_t", "xplmi__task_8h.html#a4904bbe8466b9be9707082098e502b14", [
[ "XPLM_TASK_PRIORITY_0", "xplmi__task_8h.html#a4904bbe8466b9be9707082098e502b14ad421ffdc12eef2b11396e31dc51d728e", null ],
[ "XPLM_TASK_PRIORITY_1", "xplmi__task_8h.html#a4904bbe8466b9be9707082098e502b14a64b22a16e6ff1489672b607bfc64d897", null ]
] ],
[ "XPlmi_TaskCreate", "xplmi__task_8h.html#a9a408541546aa1b8c599ce75c1f5c7d9", null ],
[ "XPlmi_TaskDispatchLoop", "xplmi__task_8h.html#ac417102cdb74ac7c522ce119729870ff", null ],
[ "XPlmi_TaskInit", "xplmi__task_8h.html#a9a8fa5b2fda2c5639ec63f6a75dc54a9", null ],
[ "XPlmi_TaskTriggerNow", "xplmi__task_8h.html#adbfc56f5e2b34a2bd07996a3231becb2", null ]
]; | var xplmi__task_8h =
[ |
|
primitive_types3.rs | // primitive_types3.rs
// Create an array with at least 100 elements in it where the ??? is.
// Execute `rustlings hint primitive_types3` for hints!
fn | () {
let a = 1..150;
if a.len() >= 100 {
println!("Wow, that's a big array!");
} else {
println!("Meh, I eat arrays like that for breakfast.");
}
}
| main |
account.go | package client
// Done1
// Get Account Details
func Account_get_details () |
// The 'Authenticate' Call
func Autenticate_get () {
// GET /authenticate.json
}
| {
// GET /account.json
} |
models.py | from datetime import datetime
import re
from urllib.parse import quote
from django.db import models
from django.utils.html import urlize
from django.utils.timezone import make_aware, utc
from django.utils.translation import ugettext_lazy as _
from requests_oauthlib import OAuth1
import requests
from mezzanine.conf import settings
from mezzanine.twitter import (
QUERY_TYPE_CHOICES,
QUERY_TYPE_USER,
QUERY_TYPE_LIST,
QUERY_TYPE_SEARCH,
)
from mezzanine.twitter import get_auth_settings
from mezzanine.twitter.managers import TweetManager
re_usernames = re.compile(r"(^|\W)@([0-9a-zA-Z+_]+)", re.IGNORECASE)
re_hashtags = re.compile(r"#([0-9a-zA-Z+_]+)", re.IGNORECASE)
replace_hashtags = '<a href="http://twitter.com/search?q=%23\\1">#\\1</a>'
replace_usernames = '\\1<a href="http://twitter.com/\\2">@\\2</a>'
class TwitterQueryException(Exception):
pass
class Query(models.Model):
type = models.CharField(_("Type"), choices=QUERY_TYPE_CHOICES, max_length=10)
value = models.CharField(_("Value"), max_length=140)
interested = models.BooleanField("Interested", default=True)
class Meta:
verbose_name = _("Twitter query")
verbose_name_plural = _("Twitter queries")
ordering = ("-id",)
def __str__(self):
return "%s: %s" % (self.get_type_display(), self.value)
def run(self):
"""
Request new tweets from the Twitter API.
"""
try:
value = quote(self.value)
except KeyError:
value = self.value
urls = {
QUERY_TYPE_USER: (
"https://api.twitter.com/1.1/statuses/"
"user_timeline.json?screen_name=%s"
"&include_rts=true" % value.lstrip("@")
),
QUERY_TYPE_LIST: (
"https://api.twitter.com/1.1/lists/statuses.json"
"?list_id=%s&include_rts=true" % value
),
QUERY_TYPE_SEARCH: "https://api.twitter.com/1.1/search/tweets.json"
"?q=%s" % value,
}
try:
url = urls[self.type]
except KeyError:
raise TwitterQueryException("Invalid query type: %s" % self.type)
auth_settings = get_auth_settings()
if not auth_settings:
from mezzanine.conf import registry
if self.value == registry["TWITTER_DEFAULT_QUERY"]["default"]:
# These are some read-only keys and secrets we use
# for the default query (eg nothing has been configured)
auth_settings = (
"KxZTRD3OBft4PP0iQW0aNQ",
"sXpQRSDUVJ2AVPZTfh6MrJjHfOGcdK4wRb1WTGQ",
"1368725588-ldWCsd54AJpG2xcB5nyTHyCeIC3RJcNVUAkB1OI",
"r9u7qS18t8ad4Hu9XVqmCGxlIpzoCN3e1vx6LOSVgyw3R",
)
else:
raise TwitterQueryException("Twitter OAuth settings missing")
try:
tweets = requests.get(url, auth=OAuth1(*auth_settings)).json()
except Exception as e:
raise TwitterQueryException("Error retrieving: %s" % e)
try:
raise TwitterQueryException(tweets["errors"][0]["message"])
except (IndexError, KeyError, TypeError):
pass
if self.type == "search":
tweets = tweets["statuses"]
for tweet_json in tweets:
remote_id = str(tweet_json["id"])
tweet, created = self.tweets.get_or_create(remote_id=remote_id)
if not created:
continue
if "retweeted_status" in tweet_json:
user = tweet_json["user"]
tweet.retweeter_user_name = user["screen_name"]
tweet.retweeter_full_name = user["name"]
tweet.retweeter_profile_image_url = user["profile_image_url"]
tweet_json = tweet_json["retweeted_status"]
if self.type == QUERY_TYPE_SEARCH:
tweet.user_name = tweet_json["user"]["screen_name"]
tweet.full_name = tweet_json["user"]["name"]
tweet.profile_image_url = tweet_json["user"]["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
else:
user = tweet_json["user"]
tweet.user_name = user["screen_name"]
tweet.full_name = user["name"]
tweet.profile_image_url = user["profile_image_url"]
date_format = "%a %b %d %H:%M:%S +0000 %Y"
tweet.text = urlize(tweet_json["text"])
tweet.text = re_usernames.sub(replace_usernames, tweet.text)
tweet.text = re_hashtags.sub(replace_hashtags, tweet.text)
if getattr(settings, "TWITTER_STRIP_HIGH_MULTIBYTE", False):
chars = [ch for ch in tweet.text if ord(ch) < 0x800]
tweet.text = "".join(chars)
d = datetime.strptime(tweet_json["created_at"], date_format)
tweet.created_at = make_aware(d, utc)
try:
tweet.save()
except Warning:
pass
tweet.save()
self.interested = False
self.save()
class Tweet(models.Model):
remote_id = models.CharField(_("Twitter ID"), max_length=50)
created_at = models.DateTimeField(_("Date/time"), null=True)
text = models.TextField(_("Message"), null=True)
profile_image_url = models.URLField(_("Profile image URL"), null=True)
user_name = models.CharField(_("User name"), max_length=100, null=True)
full_name = models.CharField(_("Full name"), max_length=100, null=True)
retweeter_profile_image_url = models.URLField(
_("Profile image URL (Retweeted by)"), null=True
)
retweeter_user_name = models.CharField(
_("User name (Retweeted by)"), max_length=100, null=True
)
retweeter_full_name = models.CharField(
_("Full name (Retweeted by)"), max_length=100, null=True
)
query = models.ForeignKey("Query", on_delete=models.CASCADE, related_name="tweets")
objects = TweetManager()
class Meta:
verbose_name = _("Tweet")
verbose_name_plural = _("Tweets")
ordering = ("-created_at",)
def | (self):
return "%s: %s" % (self.user_name, self.text)
def is_retweet(self):
return self.retweeter_user_name is not None
| __str__ |
lib.rs | #![doc(html_root_url = "https://docs.rs/linurgy/0.4.0")]
//! `linurgy` provides an interface for manipulating multiple newlines in text.
//! Interaction with this library happens through
//! [`LinurgyBuilder`](struct.LinurgyBuilder.html).
//!
//! # Examples | //! # use std::error::Error;
//! # use linurgy::LinurgyBuilder;
//! # fn main() -> Result<(), Box<dyn Error>> {
//! LinurgyBuilder::new()
//! .set_newline_trigger(2)
//! .set_new_text("\n")
//! .run()?;
//! #
//! # Ok(())
//! # }
//! ```
//!
//! Read from one buffer, remove all empty lines, and output to another buffer.
//! ```rust
//! # use std::error::Error;
//! # use linurgy::{LinurgyBuilder, Input, Output, EditType};
//! # fn main() -> Result<(), Box<dyn Error>> {
//! let input = String::from("Remove\n\nEvery\n\nEmpty\n\nLine\n");
//! let mut output = String::new();
//!
//! LinurgyBuilder::new()
//! .set_input(Input::Buffer(&input))
//! .set_output(Output::Buffer(&mut output))
//! .set_newline_trigger(2)
//! .set_edit_type(EditType::Replace)
//! .set_new_text("\n")
//! .run();
//!
//! assert_eq!("Remove\nEvery\nEmpty\nLine\n", &output);
//! #
//! # Ok(())
//! # }
//! ```
mod editor;
pub mod factory;
mod legacy;
pub use editor::{Editor, NewlineType};
pub use factory::EditType;
pub use legacy::*; | //!
//! Read stdin and for each empty line, append an extra line to stdout.
//! ```rust |
processor.rs | //! Program state processor
use solana_program::program_option::COption;
use crate::instruction::StatelessOfferInstruction;
use crate::validation_utils::{assert_is_ata, assert_keys_equal};
use {
borsh::BorshDeserialize,
solana_program::{
account_info::next_account_info,
account_info::AccountInfo,
entrypoint::ProgramResult,
msg,
program::{invoke, invoke_signed},
program_error::ProgramError,
program_pack::Pack,
pubkey::Pubkey,
system_instruction, system_program,
},
};
/// Program state handler.
pub struct Processor {}
impl Processor {
/// Processes [Instruction](enum.Instruction.html).
pub fn process(program_id: &Pubkey, accounts: &[AccountInfo], input: &[u8]) -> ProgramResult {
let instruction = StatelessOfferInstruction::try_from_slice(input)?;
match instruction {
StatelessOfferInstruction::AcceptOffer {
maker_size,
taker_size,
bump_seed,
} => {
msg!("Instruction: accept offer");
process_accept_offer(program_id, accounts, maker_size, taker_size, bump_seed)
}
}
}
}
fn process_accept_offer(
program_id: &Pubkey,
accounts: &[AccountInfo],
maker_size: u64,
taker_size: u64,
bump_seed: u8,
) -> ProgramResult {
let account_info_iter = &mut accounts.iter();
let maker_wallet = next_account_info(account_info_iter)?;
let taker_wallet = next_account_info(account_info_iter)?;
let maker_src_account = next_account_info(account_info_iter)?;
let maker_dst_account = next_account_info(account_info_iter)?;
let taker_src_account = next_account_info(account_info_iter)?;
let taker_dst_account = next_account_info(account_info_iter)?;
let maker_src_mint = next_account_info(account_info_iter)?;
let taker_src_mint = next_account_info(account_info_iter)?;
let transfer_authority = next_account_info(account_info_iter)?;
let token_program_info = next_account_info(account_info_iter)?;
let maker_src_token_account: spl_token::state::Account =
spl_token::state::Account::unpack(&maker_src_account.data.borrow())?; | // Ensure that the delegated amount is exactly equal to the maker_size
msg!(
"Delegate {}",
maker_src_token_account
.delegate
.unwrap_or(*maker_wallet.key)
);
msg!(
"Delegated Amount {}",
maker_src_token_account.delegated_amount
);
if maker_src_token_account.delegated_amount != maker_size {
return Err(ProgramError::InvalidAccountData);
}
msg!("Delegated Amount matches");
let seeds = &[
b"stateless_offer",
maker_src_account.key.as_ref(),
maker_dst_account.key.as_ref(),
taker_src_mint.key.as_ref(),
&maker_size.to_le_bytes(),
&taker_size.to_le_bytes(),
&[bump_seed],
];
let authority_key = Pubkey::create_program_address(seeds, program_id).unwrap();
assert_keys_equal(authority_key, *transfer_authority.key)?;
// Ensure that authority is the delegate of this token account
msg!("Authority key matches");
if maker_src_token_account.delegate != COption::Some(authority_key) {
return Err(ProgramError::InvalidAccountData);
}
msg!("Delegate matches");
assert_keys_equal(spl_token::id(), *token_program_info.key)?;
msg!("start");
// Both of these transfers will fail if the `transfer_authority` is the delegate of these ATA's
// One consideration is that the taker can get tricked in the case that the maker size is greater than
// the token amount in the maker's ATA, but these stateless offers should just be invalidated in
// the client.
assert_is_ata(maker_src_account, maker_wallet.key, maker_src_mint.key)?;
assert_is_ata(taker_dst_account, taker_wallet.key, maker_src_mint.key)?;
msg!(
"Transferring {} from {} to {}",
maker_src_mint.key,
maker_wallet.key,
taker_wallet.key
);
invoke_signed(
&spl_token::instruction::transfer(
token_program_info.key,
maker_src_account.key,
taker_dst_account.key,
transfer_authority.key,
&[],
maker_size,
)?,
&[
maker_src_account.clone(),
taker_dst_account.clone(),
transfer_authority.clone(),
token_program_info.clone(),
],
&[seeds],
)?;
msg!("done tx from maker to taker {}", maker_size);
if *taker_src_mint.key == spl_token::native_mint::id() {
msg!(
"Transferring lamports from {} to {}",
taker_wallet.key,
maker_wallet.key
);
assert_keys_equal(*taker_wallet.key, *taker_src_account.key)?;
assert_keys_equal(*maker_wallet.key, *maker_dst_account.key)?;
let system_program_info = next_account_info(account_info_iter)?;
assert_keys_equal(system_program::id(), *system_program_info.key)?;
invoke(
&system_instruction::transfer(taker_src_account.key, maker_dst_account.key, taker_size),
&[
taker_src_account.clone(),
maker_dst_account.clone(),
system_program_info.clone(),
],
)?;
} else {
assert_is_ata(maker_dst_account, maker_wallet.key, taker_src_mint.key)?;
assert_is_ata(taker_src_account, taker_wallet.key, taker_src_mint.key)?;
msg!(
"Transferring {} from {} to {}",
taker_src_mint.key,
taker_wallet.key,
maker_wallet.key
);
invoke(
&spl_token::instruction::transfer(
token_program_info.key,
taker_src_account.key,
maker_dst_account.key,
taker_wallet.key,
&[],
taker_size,
)?,
&[
taker_src_account.clone(),
maker_dst_account.clone(),
taker_wallet.clone(),
token_program_info.clone(),
],
)?;
}
msg!("done tx from taker to maker {}", taker_size);
msg!("done!");
Ok(())
} | msg!("Processed Accounts"); |
import_price_list.py | import frappe
def execute():
| path = frappe.get_app_path("niyopolymers", "patches", "imports", "price_list.csv")
frappe.core.doctype.data_import.data_import.import_file("Price List", path, "Insert", console=True) |
|
hash.rs | //! The `hash` module provides functions for creating SHA-256 hashes.
use crate::sanitize::Sanitize;
use borsh::{BorshDeserialize, BorshSchema, BorshSerialize};
use sha2::{Digest, Sha256};
use std::{convert::TryFrom, fmt, mem, str::FromStr};
use thiserror::Error;
pub const HASH_BYTES: usize = 32;
/// Maximum string length of a base58 encoded hash
const MAX_BASE58_LEN: usize = 44;
#[derive(
Serialize,
Deserialize,
BorshSerialize,
BorshDeserialize,
BorshSchema,
Clone,
Copy,
Default,
Eq,
PartialEq,
Ord,
PartialOrd,
Hash,
AbiExample,
)]
#[repr(transparent)]
pub struct Hash(pub(crate) [u8; HASH_BYTES]);
#[derive(Clone, Default)]
pub struct Hasher {
hasher: Sha256,
}
impl Hasher {
pub fn hash(&mut self, val: &[u8]) {
self.hasher.update(val);
}
pub fn hashv(&mut self, vals: &[&[u8]]) {
for val in vals {
self.hash(val);
}
}
pub fn result(self) -> Hash |
}
impl Sanitize for Hash {}
impl AsRef<[u8]> for Hash {
fn as_ref(&self) -> &[u8] {
&self.0[..]
}
}
impl fmt::Debug for Hash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
impl fmt::Display for Hash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", bs58::encode(self.0).into_string())
}
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum ParseHashError {
#[error("string decoded to wrong size for hash")]
WrongSize,
#[error("failed to decoded string to hash")]
Invalid,
}
impl FromStr for Hash {
type Err = ParseHashError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if s.len() > MAX_BASE58_LEN {
return Err(ParseHashError::WrongSize);
}
let bytes = bs58::decode(s)
.into_vec()
.map_err(|_| ParseHashError::Invalid)?;
if bytes.len() != mem::size_of::<Hash>() {
Err(ParseHashError::WrongSize)
} else {
Ok(Hash::new(&bytes))
}
}
}
impl Hash {
pub fn new(hash_slice: &[u8]) -> Self {
Hash(<[u8; HASH_BYTES]>::try_from(hash_slice).unwrap())
}
pub const fn new_from_array(hash_array: [u8; HASH_BYTES]) -> Self {
Self(hash_array)
}
/// unique Hash for tests and benchmarks.
pub fn new_unique() -> Self {
use std::sync::atomic::{AtomicU64, Ordering};
static I: AtomicU64 = AtomicU64::new(1);
let mut b = [0u8; HASH_BYTES];
let i = I.fetch_add(1, Ordering::Relaxed);
b[0..8].copy_from_slice(&i.to_le_bytes());
Self::new(&b)
}
pub fn to_bytes(self) -> [u8; HASH_BYTES] {
self.0
}
}
/// Return a Sha256 hash for the given data.
pub fn hashv(vals: &[&[u8]]) -> Hash {
// Perform the calculation inline, calling this from within a program is
// not supported
#[cfg(not(target_arch = "bpf"))]
{
let mut hasher = Hasher::default();
hasher.hashv(vals);
hasher.result()
}
// Call via a system call to perform the calculation
#[cfg(target_arch = "bpf")]
{
extern "C" {
fn sol_sha256(vals: *const u8, val_len: u64, hash_result: *mut u8) -> u64;
}
let mut hash_result = [0; HASH_BYTES];
unsafe {
sol_sha256(
vals as *const _ as *const u8,
vals.len() as u64,
&mut hash_result as *mut _ as *mut u8,
);
}
Hash::new_from_array(hash_result)
}
}
/// Return a Sha256 hash for the given data.
pub fn hash(val: &[u8]) -> Hash {
hashv(&[val])
}
/// Return the hash of the given hash extended with the given value.
pub fn extend_and_hash(id: &Hash, val: &[u8]) -> Hash {
let mut hash_data = id.as_ref().to_vec();
hash_data.extend_from_slice(val);
hash(&hash_data)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_new_unique() {
assert!(Hash::new_unique() != Hash::new_unique());
}
#[test]
fn test_hash_fromstr() {
let hash = hash(&[1u8]);
let mut hash_base58_str = bs58::encode(hash).into_string();
assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash));
hash_base58_str.push_str(&bs58::encode(hash.0).into_string());
assert_eq!(
hash_base58_str.parse::<Hash>(),
Err(ParseHashError::WrongSize)
);
hash_base58_str.truncate(hash_base58_str.len() / 2);
assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash));
hash_base58_str.truncate(hash_base58_str.len() / 2);
assert_eq!(
hash_base58_str.parse::<Hash>(),
Err(ParseHashError::WrongSize)
);
let input_too_big = bs58::encode(&[0xffu8; HASH_BYTES + 1]).into_string();
assert!(input_too_big.len() > MAX_BASE58_LEN);
assert_eq!(
input_too_big.parse::<Hash>(),
Err(ParseHashError::WrongSize)
);
let mut hash_base58_str = bs58::encode(hash.0).into_string();
assert_eq!(hash_base58_str.parse::<Hash>(), Ok(hash));
// throw some non-base58 stuff in there
hash_base58_str.replace_range(..1, "I");
assert_eq!(
hash_base58_str.parse::<Hash>(),
Err(ParseHashError::Invalid)
);
}
}
| {
// At the time of this writing, the sha2 library is stuck on an old version
// of generic_array (0.9.0). Decouple ourselves with a clone to our version.
Hash(<[u8; HASH_BYTES]>::try_from(self.hasher.finalize().as_slice()).unwrap())
} |
crawl2.go | package main
import (
"fmt"
"log"
)
// tokens is a counting semaphore used to
// enforce a limit of 20 concurrent requests. | func crawl(url string) []string {
fmt.Println(url)
tokens <- struct{}{} // acquire a token
list, err := links.Extract(url)
<-tokens // release the token
if err != nil {
log.Print(err)
}
return list
} | var tokens = make(chan struct{}, 20) |
error.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
pub use anyhow::anyhow;
pub use anyhow::bail;
pub use anyhow::Context;
use std::borrow::Cow;
use std::error::Error;
use std::fmt;
use std::fmt::Debug;
use std::fmt::Display;
use std::fmt::Formatter;
/// A generic wrapper that can encapsulate any concrete error type.
pub type AnyError = anyhow::Error;
/// Creates a new error with a caller-specified error class name and message.
pub fn custom_error(
class: &'static str,
message: impl Into<Cow<'static, str>>,
) -> AnyError {
CustomError {
class,
message: message.into(),
}
.into()
}
pub fn generic_error(message: impl Into<Cow<'static, str>>) -> AnyError {
custom_error("Error", message)
}
pub fn type_error(message: impl Into<Cow<'static, str>>) -> AnyError {
custom_error("TypeError", message)
}
pub fn range_error(message: impl Into<Cow<'static, str>>) -> AnyError {
custom_error("RangeError", message)
}
pub fn invalid_hostname(hostname: &str) -> AnyError {
type_error(format!("Invalid hostname: '{}'", hostname))
}
pub fn uri_error(message: impl Into<Cow<'static, str>>) -> AnyError {
custom_error("URIError", message)
}
pub fn bad_resource(message: impl Into<Cow<'static, str>>) -> AnyError {
custom_error("BadResource", message)
}
pub fn bad_resource_id() -> AnyError {
custom_error("BadResource", "Bad resource ID")
}
pub fn not_supported() -> AnyError {
custom_error("NotSupported", "The operation is not supported")
}
pub fn resource_unavailable() -> AnyError {
custom_error(
"Busy",
"Resource is unavailable because it is in use by a promise",
)
}
/// A simple error type that lets the creator specify both the error message and
/// the error class name. This type is private; externally it only ever appears
/// wrapped in an `AnyError`. To retrieve the error class name from a wrapped
/// `CustomError`, use the function `get_custom_error_class()`.
#[derive(Debug)]
struct CustomError {
class: &'static str,
message: Cow<'static, str>,
}
impl Display for CustomError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
f.write_str(&self.message)
}
}
impl Error for CustomError {}
/// If this error was crated with `custom_error()`, return the specified error
/// class name. In all other cases this function returns `None`.
pub fn get_custom_error_class(error: &AnyError) -> Option<&'static str> {
error.downcast_ref::<CustomError>().map(|e| e.class)
}
/// A `JsError` represents an exception coming from V8, with stack frames and
/// line numbers. The deno_cli crate defines another `JsError` type, which wraps
/// the one defined here, that adds source map support and colorful formatting.
#[derive(Debug, PartialEq, Clone)]
pub struct JsError {
pub message: String,
pub source_line: Option<String>,
pub script_resource_name: Option<String>,
pub line_number: Option<i64>,
pub start_column: Option<i64>, // 0-based
pub end_column: Option<i64>, // 0-based
pub frames: Vec<JsStackFrame>,
pub stack: Option<String>,
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct JsStackFrame {
pub type_name: Option<String>,
pub function_name: Option<String>,
pub method_name: Option<String>,
pub file_name: Option<String>,
pub line_number: Option<i64>,
pub column_number: Option<i64>,
pub eval_origin: Option<String>,
// Warning! isToplevel has inconsistent snake<>camel case, "typo" originates in v8:
// https://source.chromium.org/search?q=isToplevel&sq=&ss=chromium%2Fchromium%2Fsrc:v8%2F
#[serde(rename = "isToplevel")]
pub is_top_level: Option<bool>,
pub is_eval: bool,
pub is_native: bool,
pub is_constructor: bool,
pub is_async: bool,
pub is_promise_all: bool,
pub promise_index: Option<i64>,
}
impl JsStackFrame {
pub fn from_location(
file_name: Option<String>,
line_number: Option<i64>,
column_number: Option<i64>,
) -> Self {
Self {
type_name: None,
function_name: None, | line_number,
column_number,
eval_origin: None,
is_top_level: None,
is_eval: false,
is_native: false,
is_constructor: false,
is_async: false,
is_promise_all: false,
promise_index: None,
}
}
}
fn get_property<'a>(
scope: &mut v8::HandleScope<'a>,
object: v8::Local<v8::Object>,
key: &str,
) -> Option<v8::Local<'a, v8::Value>> {
let key = v8::String::new(scope, key).unwrap();
object.get(scope, key.into())
}
#[derive(serde::Deserialize)]
struct NativeJsError {
name: Option<String>,
message: Option<String>,
// Warning! .stack is special so handled by itself
// stack: Option<String>,
}
impl JsError {
pub(crate) fn create(js_error: Self) -> AnyError {
js_error.into()
}
pub fn from_v8_exception(
scope: &mut v8::HandleScope,
exception: v8::Local<v8::Value>,
) -> Self {
// Create a new HandleScope because we're creating a lot of new local
// handles below.
let scope = &mut v8::HandleScope::new(scope);
let msg = v8::Exception::create_message(scope, exception);
let (message, frames, stack) = if is_instance_of_error(scope, exception) {
// The exception is a JS Error object.
let exception: v8::Local<v8::Object> = exception.try_into().unwrap();
let e: NativeJsError =
serde_v8::from_v8(scope, exception.into()).unwrap();
// Get the message by formatting error.name and error.message.
let name = e.name.unwrap_or_else(|| "Error".to_string());
let message_prop = e.message.unwrap_or_else(|| "".to_string());
let message = if !name.is_empty() && !message_prop.is_empty() {
format!("Uncaught {}: {}", name, message_prop)
} else if !name.is_empty() {
format!("Uncaught {}", name)
} else if !message_prop.is_empty() {
format!("Uncaught {}", message_prop)
} else {
"Uncaught".to_string()
};
// Access error.stack to ensure that prepareStackTrace() has been called.
// This should populate error.__callSiteEvals.
let stack = get_property(scope, exception, "stack");
let stack: Option<v8::Local<v8::String>> =
stack.and_then(|s| s.try_into().ok());
let stack = stack.map(|s| s.to_rust_string_lossy(scope));
// Read an array of structured frames from error.__callSiteEvals.
let frames_v8 = get_property(scope, exception, "__callSiteEvals");
// Ignore non-array values
let frames_v8: Option<v8::Local<v8::Array>> =
frames_v8.and_then(|a| a.try_into().ok());
// Convert them into Vec<JsStackFrame>
let frames: Vec<JsStackFrame> = match frames_v8 {
Some(frames_v8) => serde_v8::from_v8(scope, frames_v8.into()).unwrap(),
None => vec![],
};
(message, frames, stack)
} else {
// The exception is not a JS Error object.
// Get the message given by V8::Exception::create_message(), and provide
// empty frames.
(msg.get(scope).to_rust_string_lossy(scope), vec![], None)
};
Self {
message,
script_resource_name: msg
.get_script_resource_name(scope)
.and_then(|v| v8::Local::<v8::String>::try_from(v).ok())
.map(|v| v.to_rust_string_lossy(scope)),
source_line: msg
.get_source_line(scope)
.map(|v| v.to_rust_string_lossy(scope)),
line_number: msg.get_line_number(scope).and_then(|v| v.try_into().ok()),
start_column: msg.get_start_column().try_into().ok(),
end_column: msg.get_end_column().try_into().ok(),
frames,
stack,
}
}
}
impl Error for JsError {}
fn format_source_loc(
file_name: &str,
line_number: i64,
column_number: i64,
) -> String {
let line_number = line_number;
let column_number = column_number;
format!("{}:{}:{}", file_name, line_number, column_number)
}
impl Display for JsError {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
if let Some(stack) = &self.stack {
let stack_lines = stack.lines();
if stack_lines.count() > 1 {
return write!(f, "{}", stack);
}
}
write!(f, "{}", self.message)?;
if let Some(script_resource_name) = &self.script_resource_name {
if self.line_number.is_some() && self.start_column.is_some() {
let source_loc = format_source_loc(
script_resource_name,
self.line_number.unwrap(),
self.start_column.unwrap(),
);
write!(f, "\n at {}", source_loc)?;
}
}
Ok(())
}
}
pub(crate) fn attach_handle_to_error(
scope: &mut v8::Isolate,
err: AnyError,
handle: v8::Local<v8::Value>,
) -> AnyError {
ErrWithV8Handle::new(scope, err, handle).into()
}
/// Implements `value instanceof primordials.Error` in JS. Similar to
/// `Value::is_native_error()` but more closely matches the semantics
/// of `instanceof`. `Value::is_native_error()` also checks for static class
/// inheritance rather than just scanning the prototype chain, which doesn't
/// work with our WebIDL implementation of `DOMException`.
pub(crate) fn is_instance_of_error<'s>(
scope: &mut v8::HandleScope<'s>,
value: v8::Local<v8::Value>,
) -> bool {
if !value.is_object() {
return false;
}
let message = v8::String::empty(scope);
let error_prototype = v8::Exception::error(scope, message)
.to_object(scope)
.unwrap()
.get_prototype(scope)
.unwrap();
let mut maybe_prototype =
value.to_object(scope).unwrap().get_prototype(scope);
while let Some(prototype) = maybe_prototype {
if prototype.strict_equals(error_prototype) {
return true;
}
maybe_prototype = prototype
.to_object(scope)
.and_then(|o| o.get_prototype(scope));
}
false
}
// TODO(piscisaureus): rusty_v8 should implement the Error trait on
// values of type v8::Global<T>.
pub(crate) struct ErrWithV8Handle {
err: AnyError,
handle: v8::Global<v8::Value>,
}
impl ErrWithV8Handle {
pub fn new(
scope: &mut v8::Isolate,
err: AnyError,
handle: v8::Local<v8::Value>,
) -> Self {
let handle = v8::Global::new(scope, handle);
Self { err, handle }
}
pub fn get_handle<'s>(
&self,
scope: &mut v8::HandleScope<'s>,
) -> v8::Local<'s, v8::Value> {
v8::Local::new(scope, &self.handle)
}
}
unsafe impl Send for ErrWithV8Handle {}
unsafe impl Sync for ErrWithV8Handle {}
impl Error for ErrWithV8Handle {}
impl Display for ErrWithV8Handle {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
<AnyError as Display>::fmt(&self.err, f)
}
}
impl Debug for ErrWithV8Handle {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
<Self as Display>::fmt(self, f)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_bad_resource() {
let err = bad_resource("Resource has been closed");
assert_eq!(err.to_string(), "Resource has been closed");
}
#[test]
fn test_bad_resource_id() {
let err = bad_resource_id();
assert_eq!(err.to_string(), "Bad resource ID");
}
} | method_name: None,
file_name, |
recommendation-type.ts | /**
* Cloud Guard APIs
* A description of the Cloud Guard APIs
* OpenAPI spec version: 20200131
*
*
* NOTE: This class is auto generated by OracleSDKGenerator.
* Do not edit the class manually.
*
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
* This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
*/
import * as model from "../model";
import common = require("oci-common");
/**
* Recommendation types
**/
export enum RecommendationType {
DetectorProblems = "DETECTOR_PROBLEMS",
ResolvedProblems = "RESOLVED_PROBLEMS",
/**
* This value is used if a service returns a value for this enum that is not recognized by this | */
UnknownValue = "UNKNOWN_VALUE"
}
export namespace RecommendationType {
export function getJsonObj(obj: RecommendationType): RecommendationType {
return obj;
}
export function getDeserializedJsonObj(obj: RecommendationType): RecommendationType {
return obj;
}
} | * version of the SDK. |
test_internet_access.py | from idunn.blocks.services_and_information import InternetAccessBlock
def test_internet_access_block():
internet_access_block = InternetAccessBlock.from_es({"properties": {"wifi": "no"}}, lang="en")
assert internet_access_block is None
def | ():
internet_access_block = InternetAccessBlock.from_es(
{"properties": {"internet_access": "wlan"}}, lang="en"
)
assert internet_access_block == InternetAccessBlock(wifi=True)
| test_internet_access_block_ok |
role.controller.ts | import { Controller, Get, Post, Body, Put, Param } from '@nestjs/common';
import { RoleService } from './role.service';
import { CreateRoleDto } from './dto/create.role';
import { UpdateRoleDto } from './dto/update.role';
@Controller('role')
export class RoleController {
constructor(private readonly roleService: RoleService) {} | @Get()
findAll() {
return this.roleService.findAll();
}
@Post()
create(@Body() createRoleDto: CreateRoleDto) {
return this.roleService.create(createRoleDto);
}
@Put(':id')
update(@Body() updateRoleDto: UpdateRoleDto, @Param() id) {
return this.roleService.update(id, updateRoleDto);
}
} | |
punctuation.go | /*
* Copyright (c) 2021. -present, Broos Action, Inc. All rights reserved.
*
* This source code is licensed under the MIT license
* found in the LICENSE file in the root directory of this source tree.
*/
package sentences
// PunctStrings implements all the functions necessary for punctuation strings.
// They are used to detect punctuation in the sentence
// tokenizer.
type PunctStrings interface {
NonPunct() string
Punctuation() string
HasSentencePunct(string) bool
}
// DefaultPunctStrings are used to detect punctuation in the sentence
// tokenizer.
type DefaultPunctStrings struct{}
// NewPunctStrings creates a default set of properties
func NewPunctStrings() *DefaultPunctStrings |
// NonPunct regex string to detect non-punctuation.
func (p *DefaultPunctStrings) NonPunct() string {
return `[^\W\d]`
}
// Punctuation characters
func (p *DefaultPunctStrings) Punctuation() string {
return ";:,.!?"
}
// HasSentencePunct does the supplied text have a known sentence punctuation character?
func (p *DefaultPunctStrings) HasSentencePunct(text string) bool {
endPunct := `.!?`
for _, char := range endPunct {
for _, achar := range text {
if char == achar {
return true
}
}
}
return false
}
| {
return &DefaultPunctStrings{}
} |
int_test.go | package tests
import (
"context"
"testing"
"github.com/ClickHouse/clickhouse-go/v2"
"github.com/stretchr/testify/assert"
)
func TestSimpleInt(t *testing.T) | {
var (
ctx = context.Background()
conn, err = clickhouse.Open(&clickhouse.Options{
Addr: []string{"127.0.0.1:9000"},
Auth: clickhouse.Auth{
Database: "default",
Username: "default",
Password: "",
},
Compression: &clickhouse.Compression{
Method: clickhouse.CompressionLZ4,
},
})
)
if assert.NoError(t, err) {
if err := checkMinServerVersion(conn, 21, 9, 0); err != nil {
t.Skip(err.Error())
return
}
const ddl = `
CREATE TABLE test_int (
Col1 Int64
) Engine Memory
`
defer func() {
conn.Exec(ctx, "DROP TABLE test_int")
}()
if err := conn.Exec(ctx, ddl); assert.NoError(t, err) {
if batch, err := conn.PrepareBatch(ctx, "INSERT INTO test_int"); assert.NoError(t, err) {
assert.Error(t, batch.Append(222))
}
}
}
} |
|
main.go | package main
import (
"fmt"
_ "github.com/ying32/govcl/pkgs/winappres"
"github.com/ying32/govcl/vcl"
"github.com/ying32/govcl/vcl/types"
"github.com/ying32/govcl/vcl/types/messages"
)
type TForm1 struct {
*vcl.TForm
}
var form1 *TForm1
func | () {
vcl.Application.Initialize()
vcl.Application.SetMainFormOnTaskBar(true)
vcl.Application.CreateForm(&form1)
vcl.Application.Run()
}
func (f *TForm1) OnFormCreate(sender vcl.IObject) {
form1.SetCaption("Message Test")
form1.SetPosition(types.PoScreenCenter)
form1.EnabledMaximize(false)
form1.SetWidth(500)
form1.SetHeight(400)
form1.SetOnWndProc(f.OnFormWndProc)
}
func (f *TForm1) OnFormWndProc(msg *types.TMessage) {
// 这句一定要
f.InheritedWndProc(msg)
switch msg.Msg {
case messages.WM_MOUSEMOVE:
case messages.WM_LBUTTONDOWN:
fmt.Println("左键接下")
case messages.WM_LBUTTONUP:
fmt.Println("左键抬起")
case messages.WM_LBUTTONDBLCLK:
fmt.Println("左键双击")
case messages.WM_RBUTTONDOWN:
fmt.Println("右键接下")
case messages.WM_RBUTTONUP:
fmt.Println("右键抬起")
case messages.WM_RBUTTONDBLCLK:
fmt.Println("右键双击")
}
}
| main |
create_database.go | package sqlite
import (
"context"
"strings"
"github.com/jmoiron/sqlx"
"github.com/oom-ai/oomstore/pkg/errdefs"
"github.com/oom-ai/oomstore/internal/database/dbutil"
"github.com/oom-ai/oomstore/pkg/oomstore/types"
)
func CreateDatabase(ctx context.Context, opt *types.SQLiteOpt) (err error) {
db, err := dbutil.OpenSQLite(opt.DBFile)
if err != nil |
defer db.Close()
return createMetaSchemas(ctx, db)
}
func createMetaSchemas(ctx context.Context, db *sqlx.DB) (err error) {
return dbutil.WithTransaction(db, ctx, func(ctx context.Context, tx *sqlx.Tx) error {
for _, schema := range META_TABLE_SCHEMAS {
if _, err = tx.ExecContext(ctx, schema); err != nil {
return errdefs.WithStack(err)
}
}
for _, schema := range META_VIEW_SCHEMAS {
if _, err = tx.ExecContext(ctx, schema); err != nil {
return errdefs.WithStack(err)
}
}
for table := range META_TABLE_SCHEMAS {
trigger := strings.ReplaceAll(TRIGGER_TEMPLATE, `{{TABLE_NAME}}`, table)
if _, err = tx.ExecContext(ctx, trigger); err != nil {
return errdefs.WithStack(err)
}
}
return nil
})
}
| {
return err
} |
generalized_onpolicy_loss.py | # coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a generalized onpolicy loss."""
import abc
import inspect
import gin
from seed_rl.agents.policy_gradient.modules import logging_module
import tensorflow as tf
@gin.configurable
class GeneralizedOnPolicyLoss(tf.Module, logging_module.LoggingModule):
|
class PolicyLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, advantages, target_action_log_probs,
behaviour_action_log_probs):
r"""Computes policy loss.
Args:
advantages: A float32 tensor of shape [T, B] of advantages.
target_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the current policy
behaviour_action_log_probs: A float32 tensor of shape [T, B] with
log-probabilities of taking the action by the behavioural policy
Returns:
A float32 tensor of shape [T, B] with the policy loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
class RegularizationLoss(tf.Module, metaclass=abc.ABCMeta):
"""Abstract base class for policy losses."""
@abc.abstractmethod
def __call__(self, parametric_action_distribution, target_action_logits,
behaviour_action_logits, actions):
r"""Computes regularization loss.
Args:
parametric_action_distribution: Parametric action distribution.
target_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the target policy.
behaviour_action_logits: A float32 tensor of shape [T, B, A] with
the logits of the behavioural policy.
actions: A float32 tensor of shape [T, B, A] with the actions taken by the
behaviour policy.
Returns:
A float32 tensor of shape [T, B] with the regularization loss.
"""
raise NotImplementedError('`__call__()` is not implemented!')
| """TensorFlow module implementing the generalized onpolicy loss."""
def __init__(self, agent, reward_normalizer, parametric_action_distribution,
advantage_estimator, policy_loss, discount_factor,
regularizer=None, max_abs_reward=None,
handle_abandoned_episodes_properly=True,
huber_delta=None, value_ppo_style_clip_eps=None,
baseline_cost=1., include_regularization_in_returns=False,
frame_skip=1, reward_scaling=1.0):
"""Creates a GeneralizedOnPolicyLoss."""
self._agent = agent
self._reward_normalizer = reward_normalizer
self._parametric_action_distribution = parametric_action_distribution
self._advantage_estimator = advantage_estimator
self._policy_loss = policy_loss
self._regularizer = regularizer
self._max_abs_reward = max_abs_reward
self._reward_scaling = reward_scaling
self._baseline_cost = baseline_cost
# Provided here so that it is shared.
self._discount_factor = discount_factor
self._frame_skip = frame_skip
self._handle_abandoned_episodes_properly = handle_abandoned_episodes_properly
self._value_ppo_style_clip_eps = value_ppo_style_clip_eps
self._include_regularization_in_returns = include_regularization_in_returns
if huber_delta is not None:
self.v_loss_fn = tf.keras.losses.Huber(
delta=huber_delta, reduction=tf.keras.losses.Reduction.NONE)
else:
self.v_loss_fn = tf.keras.losses.MeanSquaredError(
reduction=tf.keras.losses.Reduction.NONE)
def init(self):
for module in self.submodules:
if hasattr(module, 'init'):
if not inspect.signature(module.init).parameters:
module.init()
def compute_advantages(self, agent_state, prev_actions, env_outputs,
agent_outputs, return_learner_outputs=False):
# Extract rewards and done information.
rewards, done, _, abandoned, _ = tf.nest.map_structure(lambda t: t[1:],
env_outputs)
if self._max_abs_reward is not None:
rewards = tf.clip_by_value(rewards, -self._max_abs_reward,
self._max_abs_reward)
rewards *= self._reward_scaling
# Compute the outputs of the neural networks on the learner.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
# At this point, we have unroll length + 1 steps. The last step is only used
# as bootstrap value, so it's removed.
agent_outputs = tf.nest.map_structure(lambda t: t[:-1], agent_outputs)
learner_v = learner_outputs.baseline # current value function
learner_outputs = tf.nest.map_structure(lambda t: t[:-1], learner_outputs)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_v)
unnormalized_predictions = self._reward_normalizer.unnormalize_prediction(
corrected_predictions)
else:
corrected_predictions = learner_v
unnormalized_predictions = learner_v
if not self._handle_abandoned_episodes_properly:
abandoned = tf.zeros_like(abandoned)
done_terminated = tf.logical_and(done, ~abandoned)
done_abandoned = tf.logical_and(done, abandoned)
if self._include_regularization_in_returns and self._regularizer:
additional_rewards, _ = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action, with_logging=False)
assert rewards.shape == additional_rewards.shape
rewards += additional_rewards
# tf.math.pow does not work on TPU so we compute it manually.
adjusted_discount_factor = 1.
for _ in range(self._frame_skip):
adjusted_discount_factor *= self._discount_factor
vs, advantages = self._advantage_estimator(
unnormalized_predictions,
rewards, done_terminated,
done_abandoned,
adjusted_discount_factor,
target_action_log_probs,
behaviour_action_log_probs)
if self._reward_normalizer:
normalized_targets = self._reward_normalizer.normalize_target(vs)
normalized_advantages = self._reward_normalizer.normalize_advantage(
advantages)
self._reward_normalizer.update_normalization_statistics(vs)
else:
normalized_targets = vs
normalized_advantages = advantages
outputs = (normalized_targets, normalized_advantages)
if return_learner_outputs:
outputs += (learner_outputs,)
return outputs
def __call__(self, agent_state, prev_actions, env_outputs, agent_outputs,
normalized_targets=None, normalized_advantages=None):
"""Computes the loss."""
if normalized_targets is None:
normalized_targets, normalized_advantages, learner_outputs = \
self.compute_advantages(
agent_state, prev_actions, env_outputs, agent_outputs,
return_learner_outputs=True)
# The last timestep is only used for computing advantages so we
# remove it here.
agent_state, prev_actions, env_outputs, agent_outputs = \
tf.nest.map_structure(
lambda t: t[:-1],
(agent_state, prev_actions, env_outputs, agent_outputs))
else: # Advantages are already precomputed.
learner_outputs, _ = self._agent((prev_actions, env_outputs),
agent_state,
unroll=True,
is_training=True)
target_action_log_probs = self._parametric_action_distribution(
learner_outputs.policy_logits).log_prob(agent_outputs.action)
behaviour_action_log_probs = self._parametric_action_distribution(
agent_outputs.policy_logits).log_prob(agent_outputs.action)
# Compute the advantages.
if self._reward_normalizer:
corrected_predictions = self._reward_normalizer.correct_prediction(
learner_outputs.baseline)
old_corrected_predictions = self._reward_normalizer.correct_prediction(
agent_outputs.baseline)
else:
corrected_predictions = learner_outputs.baseline
old_corrected_predictions = agent_outputs.baseline
# Compute the advantage-based loss.
policy_loss = tf.reduce_mean(
self._policy_loss(
normalized_advantages,
target_action_log_probs,
behaviour_action_log_probs,
actions=agent_outputs.action,
target_logits=learner_outputs.policy_logits,
behaviour_logits=agent_outputs.policy_logits,
parametric_action_distribution=self._parametric_action_distribution)
)
# Value function loss
v_error = normalized_targets - corrected_predictions
self.log('GeneralizedOnPolicyLoss/V_error', v_error)
self.log('GeneralizedOnPolicyLoss/abs_V_error', tf.abs(v_error))
self.log('GeneralizedOnPolicyLoss/corrected_predictions',
corrected_predictions)
# Huber loss reduces the last dimension so we add a dummy one here.
normalized_targets = normalized_targets[..., tf.newaxis]
corrected_predictions = corrected_predictions[..., tf.newaxis]
v_loss = self.v_loss_fn(normalized_targets, corrected_predictions)
# PPO-style value loss clipping
if self._value_ppo_style_clip_eps is not None:
old_corrected_predictions = old_corrected_predictions[..., tf.newaxis]
clipped_corrected_predictions = tf.clip_by_value(
corrected_predictions,
old_corrected_predictions - self._value_ppo_style_clip_eps,
old_corrected_predictions + self._value_ppo_style_clip_eps)
clipped_v_loss = self.v_loss_fn(normalized_targets,
clipped_corrected_predictions)
v_loss = tf.maximum(v_loss, clipped_v_loss)
v_loss = tf.reduce_mean(v_loss)
# Compute the regularization loss.
if self._regularizer:
per_step_regularization, regularization_loss = self._regularizer(
self._parametric_action_distribution,
learner_outputs.policy_logits,
agent_outputs.policy_logits,
agent_outputs.action)
if not self._include_regularization_in_returns:
regularization_loss += tf.reduce_mean(per_step_regularization)
else:
regularization_loss = 0.
total_loss = policy_loss + self._baseline_cost*v_loss + regularization_loss
return total_loss |
0004_recipe_difficulty.py | # Generated by Django 3.2.4 on 2021-07-01 01:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0003_alter_recipe_description'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='difficulty',
field=models.CharField(choices=[('easy', 'Easy'), ('medium', 'Medium'), ('hard', 'Hard')], default='easy', max_length=6),
), | ] |
|
loadsheet.py | #!/usr/bin/env python3
import ethercalc
import argparse
import pprint
import sys
parser = argparse.ArgumentParser(description="Dump ethercalc sheet")
parser.add_argument("sheet", metavar='sheet', help="sheet name")
parser.add_argument("-f", "--format", dest="format", |
e = ethercalc.EtherCalc("http://localhost:8000")
a = e.update(data, format=args.format, id=args.sheet) | help="format", default="socialcalc")
args = parser.parse_args()
data = sys.stdin.buffer.read() |
tcp_client_stream.rs | // Copyright 2015-2016 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::fmt::{self, Display};
#[cfg(feature = "tokio-runtime")]
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
#[cfg(feature = "tokio-runtime")]
use async_trait::async_trait;
use futures_io::{AsyncRead, AsyncWrite};
use futures_util::{future::Future, stream::Stream, StreamExt, TryFutureExt};
use log::warn;
use crate::error::ProtoError;
#[cfg(feature = "tokio-runtime")]
use crate::iocompat::AsyncIo02As03;
use crate::tcp::{Connect, TcpStream};
use crate::xfer::{DnsClientStream, SerialMessage};
use crate::Time;
use crate::{BufDnsStreamHandle, DnsStreamHandle};
/// Tcp client stream
///
/// Use with `trust_dns_client::client::DnsMultiplexer` impls
#[must_use = "futures do nothing unless polled"]
pub struct TcpClientStream<S> {
tcp_stream: TcpStream<S>,
}
impl<S: Connect + 'static + Send> TcpClientStream<S> {
/// Constructs a new TcpStream for a client to the specified SocketAddr.
///
/// Defaults to a 5 second timeout
///
/// # Arguments
///
/// * `name_server` - the IP and Port of the DNS server to connect to
#[allow(clippy::new_ret_no_self)]
pub fn new<TE: 'static + Time>(
name_server: SocketAddr,
) -> (
TcpClientConnect<S::Transport>,
Box<dyn DnsStreamHandle + 'static + Send>,
) {
Self::with_timeout::<TE>(name_server, Duration::from_secs(5))
}
/// Constructs a new TcpStream for a client to the specified SocketAddr.
///
/// # Arguments | ///
/// * `name_server` - the IP and Port of the DNS server to connect to
/// * `timeout` - connection timeout
pub fn with_timeout<TE: 'static + Time>(
name_server: SocketAddr,
timeout: Duration,
) -> (
TcpClientConnect<S::Transport>,
Box<dyn DnsStreamHandle + 'static + Send>,
) {
let (stream_future, sender) = TcpStream::<S>::with_timeout::<TE>(name_server, timeout);
let new_future = Box::pin(
stream_future
.map_ok(move |tcp_stream| TcpClientStream { tcp_stream })
.map_err(ProtoError::from),
);
let sender = Box::new(BufDnsStreamHandle::new(name_server, sender));
(TcpClientConnect(new_future), sender)
}
}
impl<S: AsyncRead + AsyncWrite + Send> TcpClientStream<S> {
/// Wraps the TcpStream in TcpClientStream
pub fn from_stream(tcp_stream: TcpStream<S>) -> Self {
TcpClientStream { tcp_stream }
}
}
impl<S: AsyncRead + AsyncWrite + Send> Display for TcpClientStream<S> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(formatter, "TCP({})", self.tcp_stream.peer_addr())
}
}
impl<S: AsyncRead + AsyncWrite + Send + Unpin> DnsClientStream for TcpClientStream<S> {
fn name_server_addr(&self) -> SocketAddr {
self.tcp_stream.peer_addr()
}
}
impl<S: AsyncRead + AsyncWrite + Send + Unpin> Stream for TcpClientStream<S> {
type Item = Result<SerialMessage, ProtoError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let message = try_ready_stream!(self.tcp_stream.poll_next_unpin(cx));
// this is busted if the tcp connection doesn't have a peer
let peer = self.tcp_stream.peer_addr();
if message.addr() != peer {
// TODO: this should be an error, right?
warn!("{} does not match name_server: {}", message.addr(), peer)
}
Poll::Ready(Some(Ok(message)))
}
}
// TODO: create unboxed future for the TCP Stream
/// A future that resolves to an TcpClientStream
pub struct TcpClientConnect<S>(
Pin<Box<dyn Future<Output = Result<TcpClientStream<S>, ProtoError>> + Send + 'static>>,
);
impl<S> Future for TcpClientConnect<S> {
type Output = Result<TcpClientStream<S>, ProtoError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
self.0.as_mut().poll(cx)
}
}
#[cfg(feature = "tokio-runtime")]
use tokio::net::TcpStream as TokioTcpStream;
#[cfg(feature = "tokio-runtime")]
#[async_trait]
impl Connect for AsyncIo02As03<TokioTcpStream> {
type Transport = AsyncIo02As03<TokioTcpStream>;
async fn connect(addr: SocketAddr) -> io::Result<Self::Transport> {
TokioTcpStream::connect(&addr).await.map(AsyncIo02As03)
}
}
#[cfg(test)]
#[cfg(feature = "tokio-runtime")]
mod tests {
use super::AsyncIo02As03;
#[cfg(not(target_os = "linux"))]
use std::net::Ipv6Addr;
use std::net::{IpAddr, Ipv4Addr};
use tokio::net::TcpStream as TokioTcpStream;
use tokio::runtime::Runtime;
use crate::tests::tcp_client_stream_test;
use crate::TokioTime;
#[test]
fn test_tcp_stream_ipv4() {
let io_loop = Runtime::new().expect("failed to create tokio runtime");
tcp_client_stream_test::<AsyncIo02As03<TokioTcpStream>, Runtime, TokioTime>(
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
io_loop,
)
}
#[test]
#[cfg(not(target_os = "linux"))] // ignored until Travis-CI fixes IPv6
fn test_tcp_stream_ipv6() {
let io_loop = Runtime::new().expect("failed to create tokio runtime");
tcp_client_stream_test::<AsyncIo02As03<TokioTcpStream>, Runtime, TokioTime>(
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
io_loop,
)
}
} | |
get-entry.ts | import BackblazeB2Client from "../backblaze-b2/backblaze-b2-client";
const getEntry = (entryName: string) => {
const bbClient = new BackblazeB2Client();
return bbClient.getFile(entryName);
}; |
export default getEntry; |
|
global_explainer.py | """
provide global explanation methods
author: Xiaoqi
date: 2019.10.29
"""
from .feature_importance import *
from .shap_explainer import ShapExplainer
class GlobalExplainer(object):
def __init__(self, x_train, y_train, model):
"""
Initialize a feature global explainer
:param x_train: input data
:param y_train: output data
:param model: the underlying black-box model to be interpreted
"""
self.x_train = x_train
self.y_train = y_train
self.model = model
def permutation_importance(self, use_eli5=False):
"""
Global variable influence measured by permutation importance
:param use_eli5: bool, if True, use the ELI5 implementation, otherwise the raw implementation
:return: feature importance ranking plot
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
if use_eli5:
return feature_imp.eli5_perm_importance()
else:
imp = feature_imp.permutation_importance()
return feature_imp.vis_perm_importance(imp)
def weights_importance(self):
"""
Global variable influence measured by feature weights
:return: an explanation of estimator parameters (weights)
"""
feature_imp = FeatureImportance(self.x_train, self.y_train, self.model)
return feature_imp.eli5_weights_importance(show=['feature_importances', 'target', 'description'])
def shap_feature_importance(self, explainer_type='Tree', background_sample=500):
"""
Global variable influence measured by SHAP feature importance (average absolute marginal
effect of each feature)
:return: a summary plot visualized using SHAP | return tree_shap.shap_summary_plot(plot_type='bar') | """
tree_shap = ShapExplainer(self.x_train, self.model, explainer_type=explainer_type,
background_sample=background_sample) |
user_login.rs | use crate::mail::Email;
use crate::render_template;
use crate::session::Session;
use crate::user::{User, WebUser};
use crate::{crypto, die, err};
use actix_identity::Identity;
use actix_web::http::header::LOCATION;
use actix_web::http::StatusCode;
use actix_web::{HttpRequest, HttpResponse, Responder, web};
use anyhow::Result;
use gitarena_macros::{from_config, route};
use serde::Deserialize;
use sqlx::PgPool;
use tera::Context;
use tracing_unwrap::OptionExt;
use log::debug;
#[route("/login", method = "GET", err = "html")]
pub(crate) async fn get_login(web_user: WebUser, db_pool: web::Data<PgPool>) -> Result<impl Responder> {
if matches!(web_user, WebUser::Authenticated(_)) {
die!(UNAUTHORIZED, "Already logged in"); |
let (allow_registrations, bitbucket_sso_enabled, github_sso_enabled, gitlab_sso_enabled): (bool, bool, bool, bool) = from_config!(
"allow_registrations" => bool,
"sso.bitbucket.enabled" => bool,
"sso.github.enabled" => bool,
"sso.gitlab.enabled" => bool
);
let mut context = Context::new();
context.try_insert("allow_registrations", &allow_registrations)?;
context.try_insert("sso_bitbucket", &bitbucket_sso_enabled)?;
context.try_insert("sso_github", &github_sso_enabled)?;
context.try_insert("sso_gitlab", &gitlab_sso_enabled)?;
render_template!("user/login.html", context)
}
#[route("/login", method = "POST", err = "html")]
pub(crate) async fn post_login(body: web::Form<LoginRequest>, request: HttpRequest, id: Identity, db_pool: web::Data<PgPool>) -> Result<impl Responder> {
let redirect = body.redirect.as_deref().unwrap_or("/");
// User is already logged in
if id.identity().is_some() {
return Ok(HttpResponse::Found().append_header((LOCATION, redirect)).finish());
}
// TODO: Maybe allow login with email address?
let username = &body.username;
let password = &body.password;
let mut context = Context::new();
context.try_insert("username", username.as_str())?;
context.try_insert("password", password.as_str())?;
context.try_insert("error", &true)?; // The login template only gets rendered if an error occurs
if username.is_empty() {
context.try_insert("username_error", "Username cannot be empty")?;
return render_template!(StatusCode::BAD_REQUEST, "user/login.html", context);
}
if password.is_empty() {
context.try_insert("password_error", "Password cannot be empty")?;
return render_template!(StatusCode::BAD_REQUEST, "user/login.html", context);
}
// We specify whenever a username does not exist or if the password was incorrect
// This is by design as one can check anytime by just going to /<username> or checking the sign-up form
// Please see https://meta.stackoverflow.com/q/308782
let mut transaction = db_pool.begin().await?;
let option: Option<User> = sqlx::query_as::<_, User>("select * from users where username = $1 limit 1")
.bind(username)
.fetch_optional(&mut transaction)
.await?;
if option.is_none() {
debug!("Received login request for non-existent user: {}", &username);
context.try_insert("username_error", "Username does not exist")?;
return render_template!(StatusCode::UNAUTHORIZED, "user/login.html", context, transaction);
}
let user = option.unwrap_or_log();
if user.password == "sso-login" {
debug!("Received login request for an {} (id {}) despite being registered with SSO", &user.username, &user.id);
context.try_insert("password_error", "Your account has been registered with SSO. Try using another login method below.")?;
return render_template!(StatusCode::UNAUTHORIZED, "user/login.html", context, transaction);
}
if !crypto::check_password(&user, password)? {
debug!("Received login request with wrong password for {} (id {})", &user.username, &user.id);
context.try_insert("password_error", "Incorrect password")?;
return render_template!(StatusCode::UNAUTHORIZED, "user/login.html", context, transaction);
}
let primary_email = Email::find_primary_email(&user, &mut transaction)
.await?
.ok_or_else(|| err!(UNAUTHORIZED, "No primary email"))?;
if user.disabled || !primary_email.is_allowed_login() {
debug!("Received login request for disabled user {} (id {})", &user.username, &user.id);
context.try_insert("general_error", "Account has been disabled. Please contact support.")?;
return render_template!(StatusCode::UNAUTHORIZED, "user/login.html", context, transaction);
}
let session = Session::new(&request, &user, &mut transaction).await?;
id.remember(session.to_string());
debug!("{} (id {}) logged in successfully", &user.username, &user.id);
transaction.commit().await?;
Ok(HttpResponse::Found().append_header((LOCATION, redirect)).finish())
}
#[derive(Deserialize)]
pub(crate) struct LoginRequest {
username: String,
password: String,
redirect: Option<String>
} | } |
base.py | """
"""
import contextvars
from ..langref import ATOM_TYPES
__all__ = ['is_atom', 'context']
_CURRENT_CONTEXT = contextvars.ContextVar('current-runtime-context', default=None)
def is_atom(value):
"""
"""
return isinstance(value, ATOM_TYPES)
class _CurrentContextProxy:
__slots__ = ()
__getattr__ = lambda s, n: getattr(_CURRENT_CONTEXT.get(), n)
__setattr__ = lambda s, n, v: setattr(_CURRENT_CONTEXT.get(), n, v)
__delattr__ = lambda s, n: delattr(_CURRENT_CONTEXT.get(), n)
__getitem__ = lambda s, n: _CURRENT_CONTEXT.get().__getitem__(n)
__setitem__ = lambda s, n, v: _CURRENT_CONTEXT.get().__setitem__(n, v)
__delitem__ = lambda s, n: _CURRENT_CONTEXT.get().__delitem__(n)
__enter__ = lambda s: _CURRENT_CONTEXT.get().__enter__()
__exit__ = lambda s: _CURRENT_CONTEXT.get().__exit__()
__contains__ = lambda s, n: _CURRENT_CONTEXT.get().__contains__(n)
__dir__ = lambda s: dir(_CURRENT_CONTEXT.get())
__call__ = lambda s, v: _CURRENT_CONTEXT.get()(v)
__str__ = lambda s: _CURRENT_CONTEXT.get().__str__()
__repr__ = lambda s: _CURRENT_CONTEXT.get().__repr__() | context = _CurrentContextProxy()
"""The current :class:`~rollit.runtime.core.RuntimeContext`.
"""
del _CurrentContextProxy | __bool__ = lambda s: _CURRENT_CONTEXT.get() is not None
|
file_io.rs | //! file upload/download helper functions
use seed::prelude::*;
use wasm_bindgen::JsCast;
use wasm_bindgen_futures::JsFuture;
/// make the user download some data as a text file
pub fn download_text(filename: &str, data: &str) |
/// makes a file selector appear to let the user choose a file to upload
pub fn choose_upload(input_element_id: &str) {
let element = seed::document()
.get_element_by_id(input_element_id)
.unwrap();
// simulate a click on it
let event = seed::document()
.create_event("MouseEvents")
.expect("should be able to call createEvent()")
.dyn_into::<web_sys::MouseEvent>()
.ok()
.expect("should be a MouseEvent");
event.init_mouse_event_with_can_bubble_arg_and_cancelable_arg("click", true, true);
let _ = element.dispatch_event(&event);
}
/// starts a future to upload the text file and deliver it as a UploadText message
pub fn upload_file(event: web_sys::Event, orders: &mut impl Orders<super::Msg>) {
let target = event
.target()
.unwrap()
.dyn_into::<web_sys::HtmlInputElement>()
.ok()
.unwrap();
let file = target.files().unwrap().get(0).expect("should get a file");
orders.perform_cmd(async move {
let text = JsFuture::from(file.text())
.await
.expect("read file")
.as_string()
.expect("cast file text to String");
super::Msg::UploadText(text)
});
}
| {
let encoded_data: String = js_sys::encode_uri_component(&data).into();
let mime = String::from("data:text/plain;charset=utf-8");
let uri = &format!("{},{}", mime, encoded_data);
let element = seed::document()
.create_element("a")
.expect("should be able to create element");
let _ = element.set_attribute("href", uri);
let _ = element.set_attribute("download", filename);
let event = seed::document()
.create_event("MouseEvents")
.expect("should be able to call createEvent()")
.dyn_into::<web_sys::MouseEvent>()
.ok()
.expect("should be a MouseEvent");
event.init_mouse_event_with_can_bubble_arg_and_cancelable_arg("click", true, true);
let _ = element.dispatch_event(&event);
element.remove();
} |
aloy_default.rs | use crate::artifacts::{Artifact, ArtifactSetName};
use crate::artifacts::effect_config::{ArtifactEffectConfig, ArtifactEffectConfigBuilder, ConfigArchaicPetra, ConfigRate};
use crate::attribute::SimpleAttributeGraph2;
use crate::character::{Character, CharacterName};
use crate::character::character_common_data::CharacterCommonData;
use crate::character::characters::aloy::Aloy;
use crate::character::skill_config::CharacterSkillConfig;
use crate::character::traits::CharacterTrait;
use crate::common::{Element, StatName};
use crate::damage::{DamageContext, SimpleDamageBuilder};
use crate::enemies::Enemy;
use crate::target_functions::target_function_meta::{TargetFunctionFor, TargetFunctionMeta, TargetFunctionMetaImage};
use crate::target_functions::target_function_opt_config::TargetFunctionOptConfig;
use crate::target_functions::{TargetFunction, TargetFunctionConfig, TargetFunctionName};
use crate::target_functions::target_function::TargetFunctionMetaTrait;
use crate::team::TeamQuantization;
use crate::weapon::Weapon;
use crate::weapon::weapon_common_data::WeaponCommonData;
pub struct | ;
impl TargetFunctionMetaTrait for AloyDefaultTargetFunction {
#[cfg(not(target_family = "wasm"))]
const META_DATA: TargetFunctionMeta = TargetFunctionMeta {
name: TargetFunctionName::AloyDefault,
chs: "埃洛伊-「异界的救世主」",
description: "普通输出埃洛伊",
tags: "输出",
four: TargetFunctionFor::SomeWho(CharacterName::Aloy),
image: TargetFunctionMetaImage::Avatar
};
fn create(_character: &CharacterCommonData, _weapon: &WeaponCommonData, _config: &TargetFunctionConfig) -> Box<dyn TargetFunction> {
Box::new(AloyDefaultTargetFunction)
}
}
impl TargetFunction for AloyDefaultTargetFunction {
fn get_target_function_opt_config(&self) -> TargetFunctionOptConfig {
TargetFunctionOptConfig {
atk_fixed: 0.1,
atk_percentage: 1.0,
hp_fixed: 0.0,
hp_percentage: 0.0,
def_fixed: 0.0,
def_percentage: 0.0,
recharge: 0.0,
elemental_mastery: 0.0,
critical: 1.0,
critical_damage: 1.0,
healing_bonus: 0.0,
bonus_electro: 0.0,
bonus_pyro: 0.0,
bonus_hydro: 0.0,
bonus_anemo: 0.0,
bonus_cryo: 2.0,
bonus_geo: 0.0,
bonus_dendro: 0.0,
bonus_physical: 0.0,
sand_main_stats: vec![
StatName::ATKPercentage,
],
goblet_main_stats: vec![
StatName::CryoBonus,
StatName::ATKPercentage,
],
head_main_stats: vec![
StatName::CriticalRate,
StatName::CriticalDamage,
StatName::ATKPercentage,
],
set_names: Some(vec![
ArtifactSetName::ShimenawasReminiscence,
ArtifactSetName::GladiatorsFinale,
ArtifactSetName::WanderersTroupe,
]),
very_critical_set_names: None,
normal_threshold: TargetFunctionOptConfig::DEFAULT_NORMAL_THRESHOLD,
critical_threshold: TargetFunctionOptConfig::DEFAULT_CRITICAL_THRESHOLD,
very_critical_threshold: TargetFunctionOptConfig::DEFAULT_VERY_CRITICAL_THRESHOLD
}
}
fn get_default_artifact_config(&self, team_config: &TeamQuantization) -> ArtifactEffectConfig {
ArtifactEffectConfigBuilder::new()
.shimenawas_reminiscence(1.0)
.build()
}
fn target(&self, attribute: &SimpleAttributeGraph2, character: &Character<SimpleAttributeGraph2>, _weapon: &Weapon<SimpleAttributeGraph2>, _artifacts: &[&Artifact], enemy: &Enemy) -> f64 {
let context: DamageContext<'_, SimpleAttributeGraph2> = DamageContext {
character_common_data: &character.common_data,
attribute, enemy
};
type S = <Aloy as CharacterTrait>::DamageEnumType;
let config = CharacterSkillConfig::Aloy { coil_count: 4 };
let dmg_a = Aloy::damage::<SimpleDamageBuilder>(&context, S::Normal11, &config).normal.expectation;
dmg_a
}
}
| AloyDefaultTargetFunction |
index.js | 'use strict'
const Bunyan = require( 'bunyan' )
/**
* Instantiates a logger
* @class
*/
module.exports = class Logger extends Bunyan {
/**
* @constructs
* @param opts <Object> passed straight through to bunyan.Logger
*/
constructor( opts ) {
super( Object.assign({
name: 'koa'
}, opts || {} ))
}
/**
* Attaches the log instance to the koa instance
* @param opts <Object>
* @param as <String> append to context as this string
* @returns koa middleware function
*/
attach( opts ) {
opts = Object.assign({
as: 'logger'
}, opts || {} )
const logger = this
return async function( ctx, next ) {
if ( ctx[ opts.as ] ) {
console.warn( 'ctx.logger already exists' )
await next()
return
}
ctx[ opts.as ] = logger
await next()
}
}
/**
* Basic request log middleware
* @param opts <Object>
* @returns koa middleware function
*/
attachRequest( opts ) {
const logger = this
return async function( ctx, next ) {
let start = Date.now()
logger.info({
event: 'request',
method: ctx.method,
url: ctx.url
})
await next()
logger.info({
event: 'response',
method: ctx.method,
url: ctx.originalUrl,
status: ctx.status,
delta: Date.now() - start | }
} | })
} |
mod.rs | //! # Basic usage
//!
//! Visiting the accessors of a glTF asset.
//!
//! ```
//! # fn run() -> Result<(), Box<std::error::Error>> {
//! # let gltf = gltf::Gltf::open("examples/Box.gltf")?;
//! for accessor in gltf.accessors() {
//! println!("Accessor #{}", accessor.index());
//! println!("offset: {}", accessor.offset());
//! println!("count: {}", accessor.count());
//! println!("data_type: {:?}", accessor.data_type());
//! println!("dimensions: {:?}", accessor.dimensions());
//! }
//! # Ok(())
//! # }
//! # fn main() {
//! # let _ = run().expect("runtime error");
//! # }
//! ```
//!
//! # Utility functions
//!
//! Reading the values from the `vec3` accessors of a glTF asset.
//!
//! ## Note
//!
//! The [`Iter`] utility is a low-level iterator intended for use in special
//! cases. The average user is expected to use reader abstractions such as
//! [`mesh::Reader`].
//!
//! [`Iter`]: struct.Iter.html
//! [`mesh::Reader`]: ../mesh/struct.Reader.html
//!
//! ```
//! # fn run() -> Result<(), Box<dyn std::error::Error>> {
//! # use gltf::accessor::{DataType, Dimensions, Iter};
//! let (gltf, buffers, _) = gltf::import("examples/Box.gltf")?;
//! let get_buffer_data = |buffer: gltf::Buffer| buffers.get(buffer.index()).map(|x| &*x.0);
//! for accessor in gltf.accessors() {
//! match (accessor.data_type(), accessor.dimensions()) {
//! (DataType::F32, Dimensions::Vec3) => {
//! let iter = Iter::<[f32; 3]>::new(accessor, get_buffer_data);
//! for item in iter {
//! println!("{:?}", item);
//! }
//! }
//! _ => {},
//! }
//! }
//! # Ok(())
//! # }
//! # fn main() {
//! # let _ = run().expect("runtime error");
//! # }
//! ```
use crate::{buffer, Document};
pub use json::accessor::ComponentType as DataType;
pub use json::accessor::Type as Dimensions;
/// Utility functions.
#[cfg(feature = "utils")]
pub mod util;
/// Contains data structures for sparse storage.
pub mod sparse;
#[cfg(feature = "utils")]
#[doc(inline)]
pub use self::util::{Item, Iter};
/// A typed view into a buffer view.
#[derive(Clone, Debug)]
pub struct Accessor<'a> {
/// The parent `Document` struct.
document: &'a Document,
/// The corresponding JSON index.
index: usize,
/// The corresponding JSON struct.
json: &'a json::accessor::Accessor,
}
impl<'a> Accessor<'a> {
/// Constructs an `Accessor`.
pub(crate) fn new(
document: &'a Document,
index: usize,
json: &'a json::accessor::Accessor,
) -> Self {
Self {
document,
index,
json,
}
}
/// Returns the internal JSON index.
pub fn index(&self) -> usize {
self.index
}
/// Returns the size of each component that this accessor describes.
pub fn size(&self) -> usize {
self.data_type().size() * self.dimensions().multiplicity()
}
/// Returns the buffer view this accessor reads from.
///
/// This may be `None` if the corresponding accessor is sparse.
pub fn view(&self) -> Option<buffer::View<'a>> {
self.json.buffer_view.map(|view| self.document.views().nth(view.value()).unwrap())
}
/// Returns the offset relative to the start of the parent buffer view in bytes.
pub fn offset(&self) -> usize {
self.json.byte_offset as usize
}
/// Returns the number of components within the buffer view - not to be confused
/// with the number of bytes in the buffer view.
pub fn count(&self) -> usize {
self.json.count as usize
}
/// Returns the data type of components in the attribute.
pub fn data_type(&self) -> DataType {
self.json.component_type.unwrap().0
}
/// Optional application specific data.
pub fn extras(&self) -> &'a json::Extras {
&self.json.extras
}
/// Specifies if the attribute is a scalar, vector, or matrix.
pub fn dimensions(&self) -> Dimensions {
self.json.type_.unwrap()
}
/// Returns the minimum value of each component in this attribute.
pub fn min(&self) -> Option<json::Value> {
self.json.min.clone()
}
/// Returns the maximum value of each component in this attribute.
pub fn max(&self) -> Option<json::Value> {
self.json.max.clone()
}
/// Optional user-defined name for this object.
#[cfg(feature = "names")]
pub fn name(&self) -> Option<&'a str> |
/// Specifies whether integer data values should be normalized.
pub fn normalized(&self) -> bool {
self.json.normalized
}
/// Returns sparse storage of attributes that deviate from their initialization
/// value.
pub fn sparse(&self) -> Option<sparse::Sparse<'a>> {
self.json.sparse.as_ref().map(|json| {
sparse::Sparse::new(self.document, json)
})
}
}
| {
self.json.name.as_ref().map(String::as_str)
} |
py_lint_test.py | import fnmatch
import logging
import os
import subprocess
from kubeflow.testing import util
import pytest
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
def should_exclude(root, full_dir_excludes):
for e in full_dir_excludes:
if root.startswith(e):
return True
return False
def test_lint(record_xml_attribute, src_dir, rcfile): # pylint: disable=redefined-outer-name
# Override the classname attribute in the junit file.
# This makes it easy to group related tests in test grid.
# http://doc.pytest.org/en/latest/usage.html#record-xml-attribute
util.set_pytest_junit(record_xml_attribute, "test_py_lint")
logging.info('Running test_lint')
pylint_bin = "pylint"
# Print out the pylint version because different versions can produce
# different results.
util.run([pylint_bin, "--version"])
# kubeflow_testing is imported as a submodule so we should exclude it
# TODO(jlewi): We should make this an argument.
dir_excludes = [
"dashboard/frontend/node_modules",
"kubeflow_testing",
"dev-kubeflow-org/ks-app/vendor",
# TODO(https://github.com/kubeflow/testing/issues/560) stop skipping
# py/kubeflow/testing/cd once we update python & pylint so f style
# strings don't generate lint errors.
"kubeflow/testing",
"release-infra",
]
full_dir_excludes = [
os.path.join(os.path.abspath(src_dir), f) for f in dir_excludes
]
logging.info("Directories to be excluded: %s", ",".join(full_dir_excludes))
# TODO(jlewi): Use pathlib once we switch to python3.
includes = ["*.py"]
failed_files = []
if not rcfile:
rcfile = os.path.join(src_dir, ".pylintrc")
for root, dirs, files in os.walk(os.path.abspath(src_dir), topdown=True):
# Exclude vendor directories and all sub files.
if "vendor" in root.split(os.sep):
continue
# excludes can be done with fnmatch.filter and complementary set,
# but it's more annoying to read.
if should_exclude(root, full_dir_excludes):
logging.info("Skipping directory %s", root)
continue
dirs[:] = [d for d in dirs]
for pat in includes:
for f in fnmatch.filter(files, pat):
full_path = os.path.join(root, f)
try: | util.run(
[pylint_bin, "--rcfile=" + rcfile, full_path], cwd=src_dir)
except subprocess.CalledProcessError:
failed_files.append(full_path[len(src_dir):])
if failed_files:
failed_files.sort()
logging.error("%s files had lint errors:\n%s", len(failed_files),
"\n".join(failed_files))
else:
logging.info("No lint issues.")
assert not failed_files
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main() | |
index.tsx | import React from 'react';
import { RectButtonProps } from 'react-native-gesture-handler';
| onPress: () => void;
}
export function Button({
title,
onPress,
...rest
} : Props){
return(
<Container onPress={onPress} {...rest}>
<Title>
{ title }
</Title>
</Container>
);
} | import { Container, Title } from './styles';
interface Props extends RectButtonProps {
title: string; |
format.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use serde_reflection::{ContainerFormat, Error, Format, FormatHolder, Named, VariantFormat};
#[test]
fn test_format_unification() {
use Format::*;
let mut x = Unknown;
assert!(x.unify(U8).is_ok());
assert_eq!(x, U8);
assert_eq!(
x.unify(U16).unwrap_err(),
Error::Incompatible("U8".into(), "U16".into())
);
let mut x = Tuple(vec![Unknown, U32]);
x.unify(Tuple(vec![U16, Unknown])).unwrap();
assert_eq!(x, Tuple(vec![U16, U32]));
for x in vec![
Unknown,
Unit,
Bool,
I8,
I16,
I32,
I64,
I128,
U8,
U16,
U32,
U64,
U128,
F32,
F64,
Char,
Str,
Bytes,
TypeName("foo".into()),
Option(Box::new(Unit)),
Seq(Box::new(Unit)),
Map {
key: Box::new(Unit),
value: Box::new(Unit),
},
Tuple(vec![Unknown]),
]
.iter_mut()
{
assert!(x.unify(x.clone()).is_ok());
assert_eq!(*x != Unknown, x.unify(TypeName("bar".into())).is_err());
assert_eq!(*x != Unknown, x.unify(Option(Box::new(U32))).is_err());
assert_eq!(*x != Unknown, x.unify(Seq(Box::new(U32))).is_err());
assert_eq!(*x != Unknown, x.unify(Tuple(vec![])).is_err());
}
}
#[test]
fn test_container_format_unification() | {
use ContainerFormat::*;
use Format::*;
let mut x = TupleStruct(vec![Unknown, U32]);
x.unify(TupleStruct(vec![U16, Unknown])).unwrap();
assert_eq!(x, TupleStruct(vec![U16, U32]));
let mut x = Enum(
vec![(
0,
Named {
name: "foo".into(),
value: VariantFormat::Tuple(vec![Unknown]),
},
)]
.into_iter()
.collect(),
);
assert!(x
.unify(Enum(
vec![(
0,
Named {
name: "foo".into(),
value: VariantFormat::Unit,
}
)]
.into_iter()
.collect()
))
.is_err());
assert!(x
.unify(Enum(
vec![(
0,
Named {
name: "foo".into(),
value: VariantFormat::Tuple(vec![U8]),
}
)]
.into_iter()
.collect()
))
.is_ok());
// We don't check for name collisions in variants.
assert!(x
.unify(Enum(
vec![(
1,
Named {
name: "foo".into(),
value: VariantFormat::Unknown
}
)]
.into_iter()
.collect()
))
.is_ok());
for x in vec![
UnitStruct,
NewTypeStruct(Box::new(Unit)),
TupleStruct(vec![Unknown]),
Struct(vec![Named {
name: "foo".into(),
value: Unknown,
}]),
Enum(
vec![(
0,
Named {
name: "foo".into(),
value: VariantFormat::Unknown,
},
)]
.into_iter()
.collect(),
),
]
.iter_mut()
{
assert!(x.unify(x.clone()).is_ok());
assert!(x.unify(NewTypeStruct(Box::new(U8))).is_err());
assert!(x.unify(TupleStruct(vec![])).is_err());
assert!(x
.unify(Struct(vec![Named {
name: "bar".into(),
value: Unknown
}]))
.is_err());
assert!(x
.unify(Enum(
vec![(
0,
Named {
name: "bar".into(),
value: VariantFormat::Unknown
}
)]
.into_iter()
.collect()
))
.is_err());
}
} |
|
main.rs | extern crate actix;
extern crate actix_web;
extern crate blog;
extern crate dotenv;
extern crate num_cpus;
use actix::{Addr, SyncArbiter, System};
use actix_web::{
fs, http,
http::{header, Method},
middleware::cors::Cors,
server, App, HttpRequest, HttpResponse, Result,
};
use blog::util::cookies::Cookies;
use dotenv::dotenv;
use std::env;
#[macro_use]
extern crate log;
use actix_web::error::ErrorInternalServerError;
use actix_web::middleware::session::{CookieSessionBackend, RequestSession, SessionStorage};
use actix_web::middleware::{Finished, Middleware, Response, Started};
use actix_web::{AsyncResponder, Error};
use blog::models::token::Token;
//use blog::util::get_identity_and_web_context;
use blog::util::postgresql_pool::DataBase;
use blog::util::redis_pool::Cache;
use blog::{Admin, AdminArticle, AdminUser, AppState, ArticleWeb, Tag, Visitor};
use futures::future::{ok, Future};
use futures::sink::Sink;
use std::sync::Arc;
use tera::Context;
use blog::util::get_identity_and_web_context;
use time::Duration;
pub struct Preprocess;
impl Middleware<AppState> for Preprocess {
fn start(&self, mut req: &HttpRequest<AppState>) -> Result<Started> {
info!("middleware-start");
if let Some(token) = Token::get_token(&req.session()) {
info!("SESSION value: {:?}", token);
req.extensions_mut().insert(token);
} /*else {
let t = Token::new();
req.session().set("token", t.clone());
t
};*/
info!("path: {:?}", req.path());
info!("method: {:?}", req.method());
let ctx = get_identity_and_web_context(req);
req.extensions_mut().insert(ctx);
// else {
// info!("NO-SESSION");
// let res = req.session().set("counter", 1);
// match res {
// Ok(_) => info!("success"),
// Err(e) => info!("set-session failed: {:?}", e),
// };
// }
// if let Some(token) = req.headers().get("x-token") {
// info!("token: {:?}", token);
// req.extensions_mut().insert();
// }
Ok(Started::Done)
}
fn response(&self, _req: &HttpRequest<AppState>, resp: HttpResponse) -> Result<Response> {
info!("middleware-response");
Ok(Response::Done(resp))
}
fn finish(&self, req: &HttpRequest<AppState>, resp: &HttpResponse) -> Finished {
info!("middleware-finish");
if let Ok(Some(result)) = req.session().get::<String>("token") {
info!("session value new: {:?}", result);
} else {
info!("get session value new failed");
}
Finished::Done
}
}
fn main() {
::std::env::set_var("RUST_LOG", "info");
// 获取环境变量
dotenv().ok();
// init logger
env_logger::init();
// let mut static_file_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
// VSCode调试必须用绝对路径,这里获取不到该CARGO变量值
let mut static_file_dir = if cfg!(target_os = "macos") {
"/Users/iPersona/Documents/blog".to_owned()
} else {
"/home/omi/Documents/dev/blog".to_owned()
};
static_file_dir.push_str("/dist");
info!("static_file_dir: {}", static_file_dir);
let sys = System::new("example");
| // let cache_addr = SyncArbiter::start(num_cpus::get(), move || Cache::new());
// let db_addr = SyncArbiter::start(1 /*num_cpus::get()*/, move || DataBase::new());
// let cache_addr = Cache::new();
// let db_addr = DataBase::new();
server::new(move || {
let mut app = App::with_state(AppState {
// db: db_addr,
// cache: cache_addr,
db: DataBase::new(),
cache: Cache::new(),
});
app = AdminArticle::configure(app);
// app = Tag::configure(app);
// app = AdminUser::configure(app);
app = app
.middleware(SessionStorage::new(
CookieSessionBackend::signed(&[0; 32])
.name("blog_session")
.secure(false)
.max_age(Duration::from_std(std::time::Duration::from_secs(24 * 60 * 60)).unwrap())
))
.middleware(Preprocess);
app = app.handler(
"/static",
fs::StaticFiles::new(static_file_dir.as_str()).unwrap(),
);
app
})
.bind("0.0.0.0:8888")
.unwrap()
.start();
let _ = sys.run();
} | |
macros.go | package bigquery
import (
"errors"
"fmt"
"strings"
"github.com/grafana/grafana-plugin-sdk-go/backend/gtime"
"github.com/grafana/sqlds/v2"
)
func macroColumn(query *sqlds.Query, args []string) (string, error) |
func macroTable(query *sqlds.Query, args []string) (string, error) {
return "", errors.New("$__table macro is not supported")
}
func macroTimeGroup(query *sqlds.Query, args []string) (string, error) {
if len(args) < 2 {
return "", fmt.Errorf("%w: expected 2 arguments, received %d", errors.New("macro $__timeGroup needs time column and interval"), len(args))
}
timeVar := args[0]
intervalVar := strings.Trim(args[1], "'\"")
last := intervalVar[len(intervalVar)-1:]
// when month interval
if last == "M" {
return fmt.Sprintf("TIMESTAMP((PARSE_DATE(\"%%Y-%%m-%%d\",CONCAT( CAST((EXTRACT(YEAR FROM `%s`)) AS STRING),'-',CAST((EXTRACT(MONTH FROM `%s`)) AS STRING),'-','01'))))", timeVar, timeVar), nil
}
interval, err := gtime.ParseInterval(intervalVar)
if err != nil {
return "", fmt.Errorf("error parsing interval %v", intervalVar)
}
return fmt.Sprintf("TIMESTAMP_SECONDS(DIV(UNIX_SECONDS(`%s`), %v) * %v)", timeVar, interval.Seconds(), interval.Seconds()), nil
}
var macros = map[string]sqlds.MacroFunc{
"column": macroColumn,
"table": macroTable,
"timeGroup": macroTimeGroup,
}
func (s *BigQueryDatasource) Macros() sqlds.Macros {
return macros
}
| {
return "", errors.New("$__column macro is not supported")
} |
land-kodeverk-mock.ts | import { KodeverkResponse } from '../../models/kodeverk';
const mockKodeverk = [
{ kodeRef: 'COL', beskrivelse: 'COLOMBIA' },
{ kodeRef: 'USA', beskrivelse: 'USA' },
{ kodeRef: 'ESP', beskrivelse: 'SPANIA' },
{ kodeRef: 'LVS', beskrivelse: 'LANGTVEKKISTAN' },
{ kodeRef: 'BMU', beskrivelse: 'BERMUDA' },
{ kodeRef: 'MRS', beskrivelse: 'MARS' },
{ kodeRef: 'NRN', beskrivelse: 'NARNIA' },
{ kodeRef: 'NOR', beskrivelse: 'NORGE' },
{ kodeRef: 'SYD', beskrivelse: 'SYDEN' },
{ kodeRef: 'UTL', beskrivelse: 'UTLANDET' }
];
export function mockLandKodeverk(): KodeverkResponse {
return {
kodeverk: mockKodeverk
}; | } |
|
app.module.ts | import { Module, HttpModule } from '@nestjs/common';
import { AppController } from './app.controller';
import { RoutesService } from './routes-service';
import { ConcreteFactoryResolver } from './concrete-factory-resolver';
import { AdventureSearchFactory } from './concrete-factories/adventure-search-factory';
import { CitySearchFactory } from './concrete-factories/city-search-factory';
import { EnterpriseSearchFactory } from './concrete-factories/enterprise-search-factory';
@Module({
imports: [HttpModule],
controllers: [AppController],
providers: [
ConcreteFactoryResolver,
RoutesService,
AdventureSearchFactory,
CitySearchFactory,
EnterpriseSearchFactory,
],
})
export class | {}
| AppModule |
__init__.py | from AppVars import AppVars | from AppResources import AppResources |
|
gp2y0a.py | #!/usr/bin/python
# Author: Zion Orent <[email protected]>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time, sys, signal, atexit
import pyupm_gp2y0a as upmGp2y0a
# Note, for the Grove 80cm version of this sensor, due to the way it is wired,
# you need to plug this into the A0 port, where it will use the available
# A1 pin for data.
# Instantiate a GP2Y0A on analog pin A1
myIRProximity = upmGp2y0a.GP2Y0A(1)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def | (signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myIRProximity
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
# analog voltage, usually 3.3 or 5.0
GP2Y0A_AREF = 5.0;
SAMPLES_PER_QUERY = 20;
# The higher the voltage (closer to AREF) the closer the object is.
# NOTE: The measured voltage will probably not exceed 3.3 volts.
# Every second, print the averaged voltage value
# (averaged over 20 samples).
while (1):
print "AREF: {0}, Voltage value (higher means closer): {1}".format(
GP2Y0A_AREF,
myIRProximity.value(GP2Y0A_AREF, SAMPLES_PER_QUERY))
time.sleep(1)
| SIGINTHandler |
synple.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""Python wrapper for synspec
Calculation of synthetic spectra of stars and convolution with a rotational/Gaussian kernel.
Makes the use of synspec simpler, and retains the main functionalities (when used from
python). The command line interface is even simpler but fairly limited.
For information on
synspec visit http://nova.astro.umd.edu/Synspec43/synspec.html.
Example
-------
To compute the solar spectrum between 6160 and 6164 angstroms, using a model atmosphere in
the file sun.mod (provided with the distribution), with the output going into the file
sun.syn
$synple.py sun.mod 6160. 6164.
To force a micro of 1.1 km/s, and convolve the spectrum with a Gaussian kernel with a fwhm
of 0.1 angstroms
$synple.py sun.mod 6160. 6164. 1.1 0.1
To perform the calculations above in python and compare the emergent normalized profiles
>>> from synple import syn
>>> x, y, z = syn('sun.mod', (6160.,6164.))
>>> x2, y2, z2 = syn('sun.mod', (6160.,6164.), vmicro=1.1, fwhm=0.1)
in plain python
>>> import matplotlib.pyplot as plt
>>> plt.ion()
>>> plt.plot(x,y/z, x2, y2/z2)
or ipython
In [1]: %pylab
In [2]: plot(x,y/z, x2, y2/z2)
"""
import os
import sys
import subprocess
import numpy as np
import glob
import time
import copy
import gzip
from scipy import interpolate
import matplotlib.pyplot as plt
from itertools import product
#configuration
#synpledir = /home/callende/synple
synpledir = os.path.dirname(os.path.realpath(__file__))
#relative paths
modeldir = synpledir + "/models"
modelatomdir = synpledir + "/data"
linelistdir = synpledir + "/linelists"
bindir = synpledir + "/bin"
synspec = bindir + "/s54d"
rotin = bindir + "/rotin3"
#other stuff
clight = 299792.458
epsilon = 0.6 #clv coeff.
bolk = 1.38054e-16 # erg/ K
zero = " 0 "
one = " 1 "
two = " 2 "
def syn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, tmpdir=None):
"""Computes a synthetic spectrum
Interface to the fortran codes synspec/rotin that only requires two mandatory inputs:
a model atmosphere (modelfile) and the limits of the spectral range (wrange). The code
recognizes Kurucz, MARCS and Phoenix LTE model atmospheres. The sampling of the frequency
grid is chosen internally, but can also be set by adding a constant wavelength step (dw).
The abundances and microturbulence velocity can be set through the abu and vmicro
parameters, but default values will be taken from the model atmosphere. Rotational and
Gaussian broadening can be introduced (vrot and fwhm parameters). The computed spectrum
can be written to a file (save == True).
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
tmpdir: string
when is not None a temporary directory with this name will be created to store
the temporary synspec input/output files, and the synple log file (usually named
syn.log) will be named as tmpdir_syn.log.
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
#basic checks on the line list and model atmosphere
checksynspec(linelist,modelfile)
#read model atmosphere
atmostype, teff, logg, vmicro2, abu2, nd, atmos = read_model(modelfile)
if vmicro == None: vmicro = vmicro2
if abu == None: abu = abu2
if dw == None:
#space = 1e-2
space = np.mean(wrange) * np.sqrt(9.12e-15 * np.min(atmos['t']) + vmicro** 2) / clight / 3.
else:
space = dw
#check input parameters are valid
imode = checkinput(wrange, vmicro, linelist)
print ('teff,logg,vmicro=',teff,logg,vmicro)
#print ('abu=',abu)
#print (len(abu))
#print ('nd=',nd)
#print ('linelist=',linelist)
#print ('wrange=',wrange)
logfile = 'syn.log'
if tmpdir is not None:
startdir = os.getcwd()
logfile = os.path.join(startdir,os.path.split(tmpdir)[-1]) + "_" + logfile
try:
os.mkdir(tmpdir)
except OSError:
print( "cannot create tmpdir %s " % (tmpdir) )
try:
os.chdir(tmpdir)
except OSError:
print("cannot enter tmpdir %s " % (tmpdir) )
cleanup()
writetas('tas',nd,linelist) #non-std param. file
write5(teff,logg,abu,atom) #abundance/opacity file
write8(teff,logg,nd,atmos,atmostype) #model atmosphere
write55(wrange,space,imode,2,strength,vmicro,linelist,atmostype) #synspec control file
create_links(linelist) #auxiliary data
if compute == False:
wave = None
flux = None
cont = None
else:
synin = open('fort.5')
synout = open(logfile,'w')
start = time.time()
p = subprocess.Popen([synspec], stdin=synin, stdout = synout, stderr= synout, shell=True)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.7')), 'Error: I cannot read the file *fort.7* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
assert (os.path.isfile('fort.17')), 'Error: I cannot read the file *fort.17* in '+tmpdir+' -- looks like synspec has crashed, please look at syn.log'
wave, flux = np.loadtxt('fort.7', unpack=True)
wave2, flux2 = np.loadtxt('fort.17', unpack=True)
if dw == None and fwhm <= 0. and vrot <= 0.: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('syn ellapsed time ',end - start, 'seconds')
if fwhm > 0. or vrot > 0.:
start = time.time()
print( vrot, fwhm, space, steprot, stepfwhm)
wave, flux = call_rotin (wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
if dw == None: cont = np.interp(wave, wave2, flux2)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
if (dw != None):
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave3 = np.arange(nsamples)*dw + wrange[0]
#flux = np.interp(wave3, wave, flux)
flux = interp_spl(wave3, wave, flux)
cont = np.interp(wave3, wave2, flux2)
wave = wave3
if clean == True: cleanup()
if tmpdir is not None:
try:
os.chdir(startdir)
except OSError:
print("cannot change directory from tmpdir %s to startdir %s" % (tmpdir,startdir) )
if clean == True:
try:
os.rmdir(tmpdir)
except OSError:
print("cannot remove directory tmpdir %s" % (tmpdir) )
if save == True:
if synfile == None:
tmpstr = os.path.split(modelfile)[-1]
synfile = tmpstr[:tmpstr.rfind('.')]+'.syn'
np.savetxt(synfile,(wave,flux,cont))
return(wave, flux, cont)
def mpsyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
"""Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations
Wrapper for syn, using multiprocessing, to speed-up the calculation of a broad spectral range
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
from multiprocessing import Pool,cpu_count
if nthreads == 0:
nthreads = cpu_count()
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
pararr = [modelfile, wrange1, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, 'par'+str(i) ]
pars.append(pararr)
pool = Pool(nthreads)
results = pool.starmap(syn,pars)
pool.close()
pool.join()
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def raysyn(modelfile, wrange, dw=None, strength=1e-4, vmicro=None, abu=None, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', vrot=0.0, fwhm=0.0, \
steprot=0.0, stepfwhm=0.0, clean=True, save=False, synfile=None,
compute=True, nthreads=1):
"""Computes a synthetic spectrum, splitting the spectral range in nthreads parallel calculations
Wrapper for syn, using ray, to speed-up the calculation of a broad spectral range
Parameters
----------
modelfile : str
file with a model atmosphere
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float, optional
wavelength step for the output fluxes
this will be the maximum interval for the radiative
transfer, and will trigger interpolation at the end
(default is None for automatic selection)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
vrot: float
projected rotational velocity (km/s)
(default 0.)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectrum to a file (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
but see the parameter synfile to change that
synfile: str
when save is True, this can be used to set the name of the output file
(default None)
compute: bool
set to False to skip the actual synspec run, triggering clean=False
(default True)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats
continuum flux (same units as flux)
"""
import psutil
import ray
@ray.remote
def fun(vari,cons):
wrange,tmpdir = vari
modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute = cons
x, y, z = syn(modelfile, wrange, dw, strength, vmicro, abu, \
linelist, atom, vrot, fwhm, \
steprot, stepfwhm, clean, save, synfile,
compute, tmpdir)
return(x,y,z)
if nthreads == 0:
nthreads = psutil.cpu_count(logical=False)
print('nthreads=',nthreads)
ray.init(num_cpus=nthreads)
rest = [ modelfile,dw,strength,vmicro,abu,linelist, \
atom,vrot,fwhm,steprot,stepfwhm,clean,save,synfile,compute ]
constants = ray.put(rest)
delta = (wrange[1]-wrange[0])/nthreads
pars = []
for i in range(nthreads):
wrange1 = (wrange[0]+delta*i,wrange[0]+delta*(i+1))
folder = 'par'+str(i)
pararr = [wrange1, 'par'+str(i) ]
pars.append(pararr)
results = ray.get([fun.remote(pars[i],constants) for i in range(nthreads)])
x = results[0][0]
y = results[0][1]
z = results[0][2]
if len(results) > 1:
for i in range(len(results)-1):
x = np.concatenate((x, results[i+1][0][1:]) )
y = np.concatenate((y, results[i+1][1][1:]) )
z = np.concatenate((z, results[i+1][2][1:]) )
return(x,y,z)
def multisyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None, nthreads=1):
"""Computes synthetic spectra for a list of files. The values of vmicro, vrot,
fwhm, and nfe can be iterables. Whether or not dw is specified the results will be
placed on a common wavelength scale by interpolation. When not specified, dw will be
chosen as appropriate for the first model in modelfiles.
Parameters
----------
modelfiles : list of str
files with model atmospheres
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
wavelength step for the output fluxes.
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
vmicro: float, optional, can be an iterable
microturbulence (km/s)
(default is taken from the model atmosphere)
vrot: float, can be an iterable
projected rotational velocity (km/s)
(default 0.)
fwhm: float, can be an iterable
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
nfe: float, can be an iterable
[N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)
(default 0.)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectra to files (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
if multiple values of vmicro, vrot, fwhm or nfe are used, their values are
prepended to the file names
(default None)
nthreads: int
choose the number of cores to use in the calculation
(default 1, 0 has the meaning that the code should take all the cores available)
Returns
-------
wave: numpy array of floats (1D)
wavelengths (angstroms)
flux: numpy array of floats (2D -- as many rows as models input)
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats (2D -- as many rows as models input)
continuum flux (same units as flux)
"""
#when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
assert (len(modelfiles) > 0), 'multisyn needs at least one model to work with'
wave = None
flux = None
cont = None
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
abu1 = copy.copy(abu)
#if need be, adjust nitrogen abundance according to nfe
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = mpsyn(entry, wrange, dw=None, strength=strength, \
vmicro=vmicro1, abu=abu1, linelist=linelist, atom=atom, \
clean=clean, save=save, nthreads=nthreads)
space = np.mean(np.diff(x))
for vrot1 in vrots:
for fwhm1 in fwhms:
if fwhm1> 0. or vrot1 > 0.:
start = time.time()
print( entry, vmicro1, nfe1, vrot1, fwhm1, space)
x2, y2 = call_rotin (x, y, vrot, fwhm, space, steprot, stepfwhm, \
clean=False, reuseinputfiles=True)
z2 = np.interp(x2, x, z)
end = time.time()
print('convol ellapsed time ',end - start, 'seconds')
else:
x2, y2, z2 = x, y, z
if entry == modelfiles[0] and vmicro1 == vmicros[0] and vrot1 == vrots[0] and fwhm1 == fwhms[0] and nfe1 == nfes[0]:
if dw == None: dw = np.median(np.diff(x2))
nsamples = int((wrange[1] - wrange[0])/dw) + 1
wave = np.arange(nsamples)*dw + wrange[0]
#flux = np.interp(wave, x2, y2)
flux = interp_spl(wave, x2, y2)
cont = np.interp(wave, x2, z2)
else:
#flux = np.vstack ( (flux, np.interp(wave, x, y) ) )
flux = np.vstack ( (flux, interp_spl(wave, x, y) ) )
cont = np.vstack ( (cont, np.interp(wave, x, z) ) )
return(wave, flux, cont)
def polysyn(modelfiles, wrange, dw=None, strength=1e-4, abu=None, \
vmicro=None, vrot=0.0, fwhm=0.0, nfe=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'],atom='ap18', \
steprot=0.0, stepfwhm=0.0, clean=True, save=None):
"""Sets up a directory tree for computing synthetic spectra for a list of files in
parallel. The values of vmicro, vrot, fwhm, and nfe can be iterables. Whether or not
dw is specified the results will be placed on a common wavelength scale by interpolation.
When not specified, dw will be chosen as appropriate for the first model in modelfiles.
Parameters
----------
modelfiles : list of str
files with model atmospheres
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
abu: array of floats (99 elements), optional
chemical abundances relative to hydrogen (N(X)/N(H))
(default taken from input model atmosphere)
vmicro: float, optional, can be an iterable
microturbulence (km/s)
(default is taken from the model atmosphere)
vrot: float, can be an iterable
projected rotational velocity (km/s)
(default 0.)
fwhm: float, can be an iterable
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
nfe: float, can be an iterable
[N/Fe] nitrogen abundance change from the one specified in the array 'abu' (dex)
(default 0.)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the synspec
temporary files/links (default True)
save: bool
set to True to save the computed spectra to files (default False)
the root of the model atmosphere file, with an extension ".syn" will be used
if multiple values of vmicro, vrot, fwhm or nfe are used, their values are
prepended to the file names
(default None)
Returns
-------
wave: numpy array of floats (1D)
wavelengths (angstroms)
flux: numpy array of floats (2D -- as many rows as models input)
flux (H_lambda in ergs/s/cm2/A)
cont: numpy array of floats (2D -- as many rows as models input)
continuum flux (same units as flux)
"""
#synspec does not currently run in parallel
nthreads = 1
#when vmicro, vrot, fwhm or nitrogen are not iterables, we create ones, otherwise we copy them
try:
nvmicro = len(vmicro)
vmicros = vmicro
except TypeError:
nvmicro = 1
vmicros = [ vmicro ]
try:
nvrot = len(vrot)
vrots = vrots
except TypeError:
nvrot = 1
vrots = [ vrot ]
try:
nfwhm = len(fwhm)
fwhms = fwhm
except TypeError:
nfwhm = 1
fwhms = [ fwhm ]
try:
nnfe = len(nfe)
nnfes = nfe
except TypeError:
nnfe = 1
nfes = [ nfe ]
idir = 0
for entry in modelfiles:
for vmicro1 in vmicros:
for nfe1 in nfes:
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
if entry == 'missing':
pass
else:
#setup the slurm script
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH -t 04:00:00"+" \n") #hh:mm:ss
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu1 = copy.copy(abu)
#if need be, adjust nitrogen abundance according to nfe
if (abs(nfe1) > 1e-7):
if (abu1 == None):
checksynspec(linelist,entry)
atmostype, teff, logg, vmicro2, abu1, nd, atmos = read_model(entry)
abu1[6] = abu1[6] * 10.**nfe1
x, y, z = syn(entry, wrange, dw=None, strength=strength, vmicro=vmicro1, \
abu=abu1, linelist=linelist, atom=atom, compute=False)
s.write(synspec+" < "+"fort.5"+"\n")
si = open("fort.55",'r')
for i in range(6): line = si.readline()
entries = line.split()
space = float(entries[5])
si.close()
iconv = 0
for vrot1 in vrots:
for fwhm1 in fwhms:
print('iconv=',iconv)
iconv = iconv + 1
inconv = ("%07dfort.5" % (iconv) )
outconv = ("'%07dfort.7'" % (iconv) )
if fwhm1> 0. or vrot1 > 0.:
f = open(inconv,'w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", outconv) )
f.write( ' %f %f %f \n' % (vrot1, space, steprot) )
f.write( ' %f %f \n' % (fwhm1, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (wrange[0], wrange[1], 0) )
f.close()
s.write(rotin+" < "+inconv+"\n")
else:
s.write("cp "+" fort.7 "+outconv[1:-1]+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return(None,None,None)
def polyopt(wrange=(9.e2,1.e5),dw=0.1,strength=1e-3, linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], \
tlt = (20,3.08,0.068), tlrho = (20,-14.0,0.59), \
tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), \
tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), tvmicro=(1,1.0,0.0), \
zexclude=None, atom='ap18'):
"""Sets up a directory tree for computing opacity tables for TLUSTY. The table collection forms
a regular grid defined by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe),
[N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since
arrays with just one 0.0 are included by default.
Parameters
----------
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
dw: float
Unlike in 'syn' this will not be used to set the maximum wavelength step for
synthesizing any of the spectra; the appropriate step will be chosen dynamically.
Unlike in 'syn', interpolation to a constant step will always be done
(default is None for automatic selection based on the first model of the list)
strength: float, optional
threshold in the line-to-continuum opacity ratio for
selecting lines (default is 1e-4)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
atom: str
'ap18' -- generic opacities used in Allende Prieto+ 2018
'yo19' -- restricted set for NLTE calculations for APOGEE 2019 (Osorio+ 2019)
'hhm' -- continuum opacity is simplified to H and H-
(default 'ap18')
tlt: tuple
log10(T) triad (n, llimit, step) for opacity grid
(default values chosen for grid lt = np.arange(20)*0.068 + 3.08,
to cover the range in the DR16 APOGEE MARCS grids)
tlrho: tuple
log10(rho) triad (n, llimit, step) for opacity grid
(default values chosen for grid lrho = np.arange(20)*0.59 -14.0,
to cover the range in the DR16 APOGEE MARCS grids)
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
tcfe: tuple
[C/Fe] triad
tnfe: tuple
[N/Fe] triad
tofe: tuple
[O/Fe] triad
rfeh: tuple
[r/Fe] triad (r-elements abundance ratio)
sfeh: tuple
[s.Fe] triad (s-elements abundance ratio)
zexclude: list
atomic numbers of the elements whose opacity is NOT to be
included in the table
(default None)
"""
#pynspec does not currently run in parallel
nthreads = 1
#expanding the triads t* into iterables
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
try:
nvmicro = len(tvmicro)
assert (nvmicro == 3), 'Error: vmicro triad must have three elements (n, llimit, step)'
vmicros = np.arange(tvmicro[0])*tvmicro[2] + tvmicro[1]
except TypeError:
print('Error: vmicro triad must have three elements (n, llimit, step)')
return ()
#ranges for the opacity table
try:
nlt = len(tlt)
assert (nlt == 3), 'Error: lt triad must have three elements (n, llimit, step)'
lt = np.arange(tlt[0])*tlt[2] + tlt[1] #log10(T)
except TypeError:
print('Error: tlt triad must have three elements (n, llimit, step)')
return ()
try:
nlrho = len(tlrho)
assert (nlrho == 3), 'Error: lrho triad must have three elements (n, llimit, step)'
lrho = np.arange(tlrho[0])*tlrho[2] + tlrho[1] #log10(density)
except TypeError:
print('Error: tlrho triad must have three elements (n, llimit, step)')
return ()
symbol, mass, sol = elements()
z_metals = np.arange(97,dtype=int) + 3
#Ar usually included among alphas in MARCS and not in Kurucz/Meszaros
z_alphas = np.array([8,10,12,14,16,18,20,22],dtype=int)
# rs increases: notes and data below from comments in the MARCS code (provided by B.Edvardsson)
# Fractional r-process abundance for Ga-Bi (r+s simply assumed == 100%) | Date 2000-01-18
# (Note: Ga-Sr (31-38) was just copied from Kaeppeler et al. 1989, below)
# s-process from Stellar models: Arlandini C., Kaeppeler F., Wisshak K.,
# Gallino R., Busso M., Straniero O., 1999, Astrophys J. 525, 886-900
# Fractions corrected to the revised meteoritic abundances
# of Grevesse N., Sauval A.J. 1998, Space Science Review 85, 161-174
# -0.99 is assigned to unstable elements
z_rs = np.arange(62,dtype=int) + 31
rfrac= np.array([.43, .47, .81, .85, .39, .47,
.41, .11, .08, .17, .15, .50,-.99, .68, .86,
.54, .80, .48, .65, .35, .75, .83, .80, .80,
.85, .19, .38, .23, .51, .44,-.99, .71, .93,
.85, .93, .85, .92, .83, .87, .67, .80, .44,
.59, .44, .91, .91, .99, .95, .94, .41, .24,
.54, .95,-.99,-.99,-.99,-.99,-.99,-.99, 1.0,
-.99, 1.0], dtype=float)
idir = 0
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
for vmicro in vmicros:
print(feh,afe,cfe,nfe,ofe,rfe,sfe)
idir = idir + 1
dir = ( "hyd%07d" % (idir) )
try:
os.mkdir(dir)
except OSError:
print( "cannot create dir hyd%07d" % (idir) )
try:
os.chdir(dir)
except OSError:
print( "cannot change dir to hyd%07d" % (idir) )
#check input parameters are valid
imode = checkinput(wrange, vmicro, linelist)
#setup the slurm script
sfile = dir+".job"
now=time.strftime("%c")
s = open(sfile ,"w")
s.write("#!/bin/bash \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n")
s.write("#This script was written by synple on "+now+" \n")
s.write("#SBATCH -J "+dir+" \n")
s.write("#SBATCH -o "+dir+"_%j.out"+" \n")
s.write("#SBATCH -e "+dir+"_%j.err"+" \n")
s.write("#SBATCH -n "+str(nthreads)+" \n")
s.write("#SBATCH --ntasks-per-node "+str(4)+" \n")
s.write("#SBATCH -t 48:00:00"+" \n") #hh:mm:ss
s.write("#SBATCH -D "+os.path.abspath(os.curdir)+" \n")
s.write("#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-# \n\n\n")
abu = copy.copy(sol)
if (abs(feh) > 1e-7):
for i in range(len(z_metals)):
abu[z_metals[i] - 1] = abu[z_metals[i] - 1] * 10.**feh
if (abs(afe) > 1e-7):
for i in range(len(z_alphas)):
abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] * 10.**afe
if (abs(cfe) > 1e-7): abu[5] = abu[5] * 10.**cfe
if (abs(nfe) > 1e-7): abu[6] = abu[6] * 10.**nfe
if (abs(ofe) > 1e-7): abu[7] = abu[7] * 10.**ofe
if (abs(rfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * rfrac[i] * 10.**rfe
if (abs(sfe) > 1e-7):
for i in range(len(z_rs)):
if rfrac[i] > 0.0: abu[z_rs[i] - 1] = abu[z_rs[i] - 1] * (1.0 - rfrac[i]) * 10.**sfe
write55(wrange,dw=dw,imode=-3,hydprf=0, strength=strength, vmicro=vmicro, linelist=linelist)
write5(9999.,9.9,abu,atom)
writetas('tas',1,linelist)
write2(lt,lrho,wrange,filename='opt.dat', \
strength=strength,inttab=1)
if zexclude != None:
write3(zexclude)
create_links(linelist)
s.write('time ' + synspec + " < "+"fort.5"+"\n")
s.close()
os.chmod(sfile ,0o755)
try:
os.chdir('..')
except OSError:
print( "cannot exit dir hyd%07d" % (idir) )
return()
def collect_marcs(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
tcfe=(1,0.0,0.0), tnfe=(1,0.0,0.0), tofe=(1,0.0,0.0), trfe=(1,0.0,0.0), tsfe=(1,0.0,0.0), \
ignore_missing_models=False):
"""Collects all the MARCS models in modeldir that are part of a regular grid defined
by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), [alpha/Fe] (tafe), [C/Fe] (tcfe),
[N/Fe] (tnfe), [O/Fe] (tofe), [r/Fe] (rfe), and [s/Fe] (sfe) are optional since
arrays with just one 0.0 are included by default.
Parameters
----------
modeldir: str
directory where model atmosphere files are
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
tcfe: tuple
[C/Fe] triad
tnfe: tuple
[N/Fe] triad
tofe: tuple
[O/Fe] triad
rfeh: tuple
[r/Fe] triad (r-elements abundance ratio)
sfeh: tuple
[s.Fe] triad (s-elements abundance ratio)
ignore_missing_models: bool
set to True to avoid stopping when a model is missing,
in which case a None is entered in the returning list
Returns
-------
files: list of str
file names with MARCS models that are in modeldir and match
the parameters in the requested grid
"""
#expanding the triads t* into iterables
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
try:
ncfe = len(tcfe)
assert (ncfe == 3), 'Error: cfe triad must have three elements (n, llimit, step)'
cfes = np.arange(tcfe[0])*tcfe[2] + tcfe[1]
except TypeError:
print('Error: cfe triad must have three elements (n, llimit, step)')
return ()
try:
nnfe = len(tnfe)
assert (nnfe == 3), 'Error: nfe triad must have three elements (n, llimit, step)'
nfes = np.arange(tnfe[0])*tnfe[2] + tnfe[1]
except TypeError:
print('Error: nfe triad must have three elements (n, llimit, step)')
return ()
try:
nofe = len(tofe)
assert (nofe == 3), 'Error: ofe triad must have three elements (n, llimit, step)'
ofes = np.arange(tofe[0])*tofe[2] + tofe[1]
except TypeError:
print('Error: ofe triad must have three elements (n, llimit, step)')
return ()
try:
nrfe = len(trfe)
assert (nrfe == 3), 'Error: rfe triad must have three elements (n, llimit, step)'
rfes = np.arange(trfe[0])*trfe[2] + trfe[1]
except TypeError:
print('Error: rfe triad must have three elements (n, llimit, step)')
return ()
try:
nsfe = len(tsfe)
assert (nsfe == 3), 'Error: sfe triad must have three elements (n, llimit, step)'
sfes = np.arange(tsfe[0])*tsfe[2] + tsfe[1]
except TypeError:
print('Error: sfe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
for cfe in cfes:
for nfe in nfes:
for ofe in ofes:
for rfe in rfes:
for sfe in sfes:
print(teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe)
code = 'm*_t*_x3'
if logg >= 3.5:
a1 = 'p'
else:
a1 = 's'
filename = ("%s%4i_g%+.1f_%s_z%+.2f_a%+.2f_c%+.2f_n%+.2f_o%+.2f_r%+.2f_s%+.2f.mod*" % (a1,teff,logg,code,feh,afe,cfe,nfe,ofe,rfe,sfe) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %s %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f %+.2f\n" % (files[-1],teff,logg,feh,afe,cfe,nfe,ofe,rfe,sfe) )
fi.close()
return(files)
def collect_k2odfnew(modeldir=modeldir, tteff=None, tlogg=None, tfeh=(1,0.0,0.0), tafe=(1,0.0,0.0), \
ignore_missing_models=False):
"""Collects all the ODFNEW Castelli/Kurucz models in modeldir that are part of a regular grid defined
by triads in various parameters. Each triad has three values (n, llimit, step)
that define an array x = np.range(n)*step + llimit. Triads in teff (tteff) and logg
(tlogg) are mandatory. Triads in [Fe/H] (tfeh), and [alpha/Fe] (tafe) are optional since
arrays with just one 0.0 are included by default.
NOTE: There are ODFNEW models with only afe=[alpha/Fe]=0.0 or 0.4. The latter are used whenever
afe takes values > 0.0, while the afe=0.0 models are used otherwise.
Parameters
----------
modeldir: str
directory where model atmosphere files are
tteff: tuple
Teff triad (n, llimit, step)
tlogg: tuple
logg triad (n, llimit, step)
tfeh: tuple
[Fe/H] triad
tafe: tuple
[alpha/Fe] triad
ignore_missing_models: bool
set to True to avoid stopping when a model is missing,
in which case a None is entered in the returning list
Returns
-------
files: list of str
file names with Kurucz ODFNEWS models that are in modeldir and match
the parameters in the requested grid
"""
#expanding the triads t* into iterables
try:
nteff = len(tteff)
assert (nteff == 3), 'Error: Teff triad must have three elements (n, llimit, step)'
teffs = np.arange(tteff[0])*tteff[2] + tteff[1]
except TypeError:
print('Error: Teff triad must have three elements (n, llimit, step)')
return ()
try:
nlogg = len(tlogg)
assert (nlogg == 3), 'Error: logg triad must have three elements (n, llimit, step)'
loggs = np.arange(tlogg[0])*tlogg[2] + tlogg[1]
except TypeError:
print('Error: logg triad must have three elements (n, llimit, step)')
return ()
try:
nfeh = len(tfeh)
assert (nfeh == 3), 'Error: feh triad must have three elements (n, llimit, step)'
fehs = np.arange(tfeh[0])*tfeh[2] + tfeh[1]
except TypeError:
print('Error: feh triad must have three elements (n, llimit, step)')
return ()
try:
nafe = len(tafe)
assert (nafe == 3), 'Error: afe triad must have three elements (n, llimit, step)'
afes = np.arange(tafe[0])*tafe[2] + tafe[1]
except TypeError:
print('Error: afe triad must have three elements (n, llimit, step)')
return ()
files = []
fi = open('files.txt','w')
for teff in teffs:
for logg in loggs:
for feh in fehs:
for afe in afes:
print(teff,logg,feh,afe)
code = 'k2odfnew.dat'
if afe > 0.0:
a1 = 'a'
else:
a1 = ''
if feh < 0.0:
a2 = 'am'
else:
a2 = 'ap'
filename = ("t%05ig%.1f%s%02i%s" % (teff,logg,a2,int(abs(feh)*10),a1+code) )
file = glob.glob(os.path.join(modeldir,filename))
if ignore_missing_models == False:
assert len(file) > 0, 'Cannot find model '+filename+' in modeldir '+modeldir
assert len(file) == 1, 'More than one model matches '+filename+' in modeldir '+modeldir
else:
if (len(file) == 0): files.append('missing')
if (len(file) == 1): files.append(file[0])
fi.write( "%s %4i %+.1f %+.2f %+.2f \n" % (files[-1],teff,logg,feh,afe) )
fi.close()
return(files)
def getallt(modelfiles):
"""Collects all the values for temperature, density and electron number density
in a list of files with model atmospheres
Parameters
----------
modelfiles : list of str
files with model atmospheres
Returns
-------
t: list
list of all temperatures in all the layers of the input model atmospheres
rho: list
list of all values of gas pressure in all the layers of the input model atmospheres
ne: list
list of all values of electron number density in all the layers of the input model atmospheres
"""
t = []
rho = []
ne = []
for entry in modelfiles:
print('reading ',entry)
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(entry)
#atmostype,teff,logg,vmicro,abu,nd,atmos = read_model(entry)
for value in atmos['t']: t.append(value)
for value in atmos['rho']: rho.append(value)
for value in atmos['ne']: ne.append(value)
return(t,rho,ne)
def call_rotin(wave=None, flux=None, vrot=0.0, fwhm=0.0, space=1e-2, steprot=0.0, stepfwhm=0.0, clean=True, reuseinputfiles=False):
"""Convolves a synthetic spectrum with a rotation and/or Gaussian kernel
Interface to the fortran code rotin.
Parameters
----------
wave: numpy array of floats
wavelengths (angstroms)
flux: numpy array of floats
flux
vrot: float
projected rotational velocity (km/s)
(default 0.)
space: float, optional
characteristic wavelength scale for variations in the spectrum (angstroms)
(default is 1e-2)
steprot: float
wavelength step for convolution with rotational kernel (angstroms)
set to 0. for automatic adjustment (default 0.)
fwhm: float
Gaussian broadening: macroturbulence, instrumental, etc. (angstroms)
(default 0.)
stepfwhm: float
wavelength step for Gaussian convolution (angstroms)
set to 0. for automatic adjustment (default 0.)
clean: bool
True by the default, set to False to avoid the removal of the rotin
temporary files (default Tr<ue)
reuseinputfiles: bool
set to take the input data from the output synspec file (fort.7) rather than
from the input arrays (wave, flux)
Returns
-------
wave2: numpy array of floats
wavelengths (angstroms)
flux2: numpy array of floats
flux
"""
if reuseinputfiles == False:
f = open('fort.7','w')
f2 = open('fort.17','w')
maxflux = np.max(flux)
for i in range(len(wave)):
f.write( ' %f %f \n' % (wave[i], flux[i]) )
f2.write( ' %f %f \n' % (wave[i], maxflux) )
f.close()
f2.close()
f = open('fort.5','w')
f.write( ' %s %s %s \n' % ("'fort.7'", "'fort.17'", "'fort.11'") )
f.write( ' %f %f %f \n' % (vrot, space, steprot) )
f.write( ' %f %f \n' % (fwhm, stepfwhm) )
print('stepfwhm=',stepfwhm)
f.write( ' %f %f %i \n' % (np.min(wave), np.max(wave), 0) )
f.close()
synin = open('fort.5')
synout = open('syn.log','a')
p = subprocess.Popen([rotin], stdin=synin, stdout = synout, stderr = synout)
p.wait()
synout.flush()
synout.close()
synin.close()
assert (os.path.isfile('fort.11')), 'Error: I cannot read the file *fort.11* in '+tmpdir+' -- looks like rotin has crashed, please look at syn.log'
wave2, flux2 = np.loadtxt('fort.11', unpack=True)
print(len(wave),len(wave2))
if clean == True: cleanup()
return(wave2, flux2)
def read_model(modelfile):
|
def identify_atmostype(modelfile):
"""Idenfies the type of model atmosphere in an input file
Valid options are kurucz, marcs or phoenix
Parameters
----------
modelfile: str
file with a model atmosphere
Returns
-------
atmostype: str
can take the value 'kurucz', 'marcs' or 'phoenix' ('tlusty' soon to be added!)
"""
if ('PHOENIX' in modelfile and 'fits' in modelfile): atmostype = 'phoenix'
else:
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
print('modelfile / line=',modelfile,line)
type(line)
if ('TEFF' in line): atmostype = 'kurucz'
else: atmostype = 'marcs'
f.close()
return(atmostype)
def checksynspec(linelist,modelfile):
"""checking that executables and data are where it should be
Parameters
----------
linelist: array of str
file names of the line lists to be used. The first string should correspond
to the atomic line list and is mandatory. The remainder are optional and
correspond to molecular line lists. All files should be in synspec format.
(see documentation at http://nova.astro.umd.edu/Synspec43/synspec.html)
"""
dirs = [synpledir,modelatomdir,linelistdir,bindir]
for entry in dirs: assert (os.path.isdir(entry)), 'dir '+entry+' missing'
files = [synspec,rotin]
for entry in linelist:
if not os.path.isfile(entry):
ll = os.path.join(linelistdir,entry)
if os.path.isfile(ll): files.append(ll)
for entry in files: assert (os.path.isfile(entry)), 'file '+entry+' missing'
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
print(modeldir)
print(modelfile)
assert (os.path.isfile(modelfile)),'model atmosphere file '+modelfile+' missing'
return(True)
def checkinput(wrange, vmicro, linelist):
"""checking input parameters from user
Parameters
----------
wrange: tuple or list of two floats
initial and ending wavelengths (angstroms)
vmicro: float, optional
microturbulence (km/s)
(default is taken from the model atmosphere)
linelist: array of str
filenames of the line lists, the first one corresponds to
the atomic lines and all the following ones (optional) to
molecular lines
(default ['gfallx3_bpo.19','kmol3_0.01_30.20'] from Allende Prieto+ 2018)
Returns
------
imode: int
appropriate value for the variable imode, which specifies whether
one will use many atomic lines (imode=0), just a few (imode=1),
or none (H lines are an exception; imode=2)
"""
#determine imode
# imode = 0 is default, atoms and molecules, at least 2 line lists
# synple sets IFMOL = 1 in 'tas' when an input molecular line list is used
# but does not set it when only an atomic line list is given
# imode = 2 for pure continuum
# imode = 1 for few-lines mode
# imode = -3 for regular opacity tables (TLUSTY)
if len(linelist) == 0:
imode = 2 # no atomic or molecular line list -> pure continuum and no molecules
else:
#find range of atomic line list
if not os.path.isfile(linelist[0]):
ll = os.path.join(linelistdir,linelist[0])
if os.path.isfile(ll): linelist[0] = ll
nlines, minlambda, maxlambda = getlinelistrange(linelist[0])
#check
if nlines > 10:
assert (wrange[0] > minlambda-1 and wrange[1] < maxlambda+1),'wrange exceeds the allow range ('+str(minlambda)+' to '+str(maxlambda)+')'
imode = 0
else:
imode = 1
assert (vmicro >= 0.0),'vmicro = '+str(vmicro)+' but cannot < 0.'
return(imode)
def getlinelistrange(atomiclinelist):
#finds out min and max wavelengths for a line list
f = open(atomiclinelist,'r')
line = f.readline()
entries = line.split()
minlambda = float(entries[0])*10.
fsize = os.path.getsize(atomiclinelist)
f.seek(fsize-103)
line = f.readline()
f.close()
entries = line.split()
maxlambda = float(entries[0])*10.
nlines = int(0.01 * fsize)
return(nlines, minlambda,maxlambda)
def writetas(filename,nd,linelist):
#write non-std input parameters
# input: filename -- str -- name of the non-std. param. file to print
# nd -- int -- number of layers in the model
# nd -- list -- names of the linelist files (atomic first, then one
# or more molecular ones
f = open(filename,'w')
f.write("ND= "+str(nd)+" \n")
if len(linelist) > 1: f.write("IFMOL= "+one+" \n")
f.write("TMOLIM= 8000. \n")
f.close()
return()
def write3(zexclude):
f = open('fort.3','w')
for z in zexclude:
f.write( " %d %10.4e \n" % (z, 0.0) )
f.close()
return()
def write2(lt,lrho,wrange, filename='opt.data', dlw=2e-5, binary=False,strength=1e-4,inttab=1):
#write fort.2 file for creating opacity tables for TLUSTY
f = open('fort.2','w')
f.write( " %d %10.4e %10.4e \n" % (len(lt),10.**lt[0],10.**lt[-1]) )
f.write( " %d \n" % (1) )
f.write( " %d %10.4e %10.4e \n" % (len(lrho),10.**lrho[0],10.**lrho[-1]) )
nsamples = int( (np.log10(wrange[1]) - np.log10(wrange[0]) )/dlw) + 1
f.write( " %d %d %10.4e %10.4e \n" % (nsamples,inttab,wrange[0],wrange[1]) )
if binary == True:
ibingr = 1
else:
ibingr = 0
filename = "'"+filename+"'"
f.write( " %s %d \n" % (filename,ibingr) )
f.close()
return()
def write55(wrange,dw=1e-2,imode=0,hydprf=2,strength=1e-4,vmicro=0.0, \
linelist=['gfallx3_bpo.19','kmol3_0.01_30.20'], atmostype='kurucz'):
#imode,idst,iprin
#inmod,zero,ichang,ichemc
#lyman,zero,zero,zero,zero
#one,nlte,icontl,zero,ifhe2
#ihydpr,ihe1pr,ihe2pr
#wstart,wend,cutoff,zero,strength,wdist
if (atmostype == 'tlusty' or atmostype == 'marcs'): inmod = 1
else: inmod = 0
f = open('fort.55','w')
f.write(" "+str(imode)+" "+2*zero+"\n")
f.write(" "+str(inmod)+3*zero+"\n")
f.write(5*zero+"\n")
f.write(one+4*zero+"\n")
f.write(str(hydprf)+2*zero+"\n")
if imode == -3:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], -wrange[1], 100., 2000, strength, dw) )
else:
f.write( ' %f %f %f %i %e %f \n ' % (wrange[0], wrange[1], 200., 2000, strength, dw) )
ll = len(linelist)
if ll < 2: f.write(2*zero)
else: f.write(str(ll-1) + ' ' + ' '.join(map(str,np.arange(ll-1)+20)))
f.write("\n")
f.write( ' %f \n' % (vmicro) )
f.close()
def write5(teff,logg,abu, atom='ap18', ofile='fort.5', nlte=False, tl=False):
symbol, mass, sol = elements()
f = open(ofile,'w')
f.write(' '+str(teff)+" "+str(logg).format('%7.4f')+" ! TEFF, GRAV \n")
if nlte:
f.write(" F F ! LTE, GRAY \n")
else:
f.write(" T F ! LTE, GRAY \n")
f.write(" 'tas' ! name of non-standard flags \n")
f.write(" 50 ! frequencies \n")
if tl:
natom = 30
else:
natom = len(abu)
f.write(" "+str(natom)+" ! NATOMS \n")
assert (atom == 'hhm' or atom == 'ap18' or atom == 'yo19'), 'atom must be one of: hhm/ap18/yo19!'
ex = np.ones(natom)
if atom == 'hhm' :
zex = [1] #atomic numbers of elements included explicitly (contributing cont. opacity)
elif atom == 'yo19':
zex = [1,11,12,19,20]
elif atom == 'ap18':
zex = [1,2,6,7,8,11,12,13,14,20,26]
for i in zex: ex[i-1] = 2
if nlte: ex[0] = -3
for i in range(natom):
f.write(' %2d %e %i %s\n' % (ex[i], abu[i], 0, ' ! ' +symbol[i]) )
for i in range(3): f.write("* \n")
if atom == 'hhm': # highly simplified continuum opacities -- just H and H-
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n" )
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == "yo19": # set for NLTE calculations for APOGEE (see Osorio+ 2019 A&A paper)
f.write("* ../data_atom for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 0' 'data_atom/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 16 0 0 0 ' H 1' 'data_atom/h1_16lev2.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 11 0 42 0 0 0 'Na 1' 'data_atom/NaIkas.tl' \n")
f.write(" 11 1 1 1 0 0 'Na 2' '' \n")
f.write(" 12 0 96 0 0 0 'Mg 1' 'data_atom/Mg1kas_F_ccc.tl' \n")
f.write(" 12 1 29 0 0 0 'Mg 2' 'data_atom/Mg2kas_F_ccc.tl' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 19 0 31 0 0 0 'K 1' 'data_atom/KIkas.tl' \n")
f.write(" 19 1 1 1 0 0 'K 2' '' \n")
f.write(" 20 0 66 0 0 0 'Ca 1' 'data_atom/Ca1kas_F_zat.tl' \n")
f.write(" 20 1 24 0 0 0 'Ca 2' 'data_atom/Ca2kas_F_zat.tl' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
elif atom == 'ap18': # generic set used in Allende Prieto+ (2018) A&A paper
f.write("* ../data for ions \n")
f.write(" 1 -1 1 0 0 1 ' H 1' 'data/hm.dat' \n")
f.write(" 0 0 3 0 \n")
f.write(" 1 0 9 0 0 0 ' H 1' 'data/h1s.dat' \n")
f.write(" 1 1 1 1 0 0 ' H 2' ' ' \n")
f.write(" 2 0 14 0 0 0 'He 1' 'data/he1.dat' \n")
f.write(" 2 1 14 0 0 0 'He 2' 'data/he2.dat ' \n")
f.write(" 2 2 1 1 0 0 'He 3' ' ' \n")
f.write(" 6 0 104 0 0 0 ' C 1' 'data/c1.t' \n")
f.write(" 6 1 40 0 0 0 ' C 2' 'data/c2.t' \n")
f.write(" 6 2 1 1 0 0 ' C 3' ' ' \n")
f.write(" 7 0 89 0 0 0 ' N 1' 'data/n1.t' \n")
f.write(" 7 1 51 0 0 0 ' N 2' 'data/n2.t' \n")
f.write(" 7 2 1 1 0 0 ' N 3' ' ' \n")
f.write(" 8 0 54 0 0 0 ' O 1' 'data/o1.t' \n")
f.write(" 8 1 74 0 0 0 ' O 2' 'data/o2.t' \n")
f.write(" 8 2 1 1 0 0 ' O 3' ' ' \n")
f.write(" 11 0 32 0 0 0 'Na 1' 'data/na1.t' \n")
f.write(" 11 1 8 0 0 0 'Na 2' 'data/na2.t' \n")
f.write(" 11 2 1 1 0 0 'Na 3' ' ' \n")
f.write(" 12 0 71 0 0 0 'Mg 1' 'data/mg1.t' \n")
f.write(" 12 1 31 0 0 0 'Mg 2' 'data/mg2.t' \n")
f.write(" 12 2 1 1 0 0 'Mg 3' ' ' \n")
f.write(" 13 0 33 0 0 0 'Al 1' 'data/al1.t' \n")
f.write(" 13 1 81 0 0 0 'Al 2' 'data/al2.t' \n")
f.write(" 13 2 1 1 0 0 'Al 3' ' ' \n")
f.write(" 14 0 57 0 0 0 'Si 1' 'data/si1.t' \n")
f.write(" 14 1 46 0 0 0 'Si 2' 'data/si2.t' \n")
f.write(" 14 2 1 1 0 0 'Si 3' ' ' \n")
f.write(" 20 0 79 0 0 0 'Ca 1' 'data/ca1.t' \n")
f.write(" 20 1 32 0 0 0 'Ca 2' 'data/ca2.t' \n")
f.write(" 20 2 1 1 0 0 'Ca 3' ' ' \n")
f.write(" 26 0 49 0 0 0 'Fe 1' 'data/tlusty_fe1_topmod.dat' \n")
f.write(" 26 1 41 0 0 0 'Fe 2' 'data/tlusty_fe2_topmod.dat' \n")
f.write(" 26 2 1 1 0 0 'Fe 3' ' ' \n")
f.write(" 0 0 0 -1 0 0 ' ' ' ' \n")
f.write("* \n")
f.write("* end \n")
f.close()
def write8(teff, logg, nd, atmos, atmostype, ofile='fort.8'):
f = open(ofile,'w')
if atmostype == 'tlusty':
f.write(" "+str(nd)+" "+str(3)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i] ) )
f.close()
else:
if atmostype == 'marcs':
f.write(" "+str(nd)+" "+str(-4)+"\n")
for i in range(nd):
f.write(' %e ' % atmos['dm'][i])
f.write("\n")
for i in range(nd):
f.write( '%f %e %e %e \n' % (atmos['t'][i], atmos['ne'][i], atmos['rho'][i], atmos['rho'][i]/atmos['mmw'][i]/1.67333e-24 + atmos['ne'][i] ) )
f.close()
else:
f.write( 'TEFF %7.0f GRAVITY %7.5f LTE \n' % (teff, logg) )
for i in range(21): f.write('\n')
f.write( 'READ DECK6%3i RHOX,T,P,XNE \n' % nd )
for i in range(nd):
f.write( '%e %f %e %e \n' % (atmos['dm'][i], atmos['t'][i], atmos['p'][i], atmos['ne'][i]) )
f.close()
return()
def create_links(linelist):
#create soft links for line lists, mand odel atom dir
for i in range(len(linelist)):
if not os.path.isfile(linelist[i]):
ll = os.path.join(linelistdir,linelist[i])
if os.path.isfile(ll): linelist[i] = ll
if i == 0: os.symlink(linelist[0],'fort.19')
else: os.symlink(linelist[i],'fort.'+str(20-1+i))
os.symlink(modelatomdir,'./data')
return()
def cleanup():
#cleanup all temporary files
files = os.listdir('.')
for entry in files:
if os.path.islink(entry) and entry.startswith('fort'): os.unlink(entry)
if os.path.isfile(entry) and entry.startswith('fort'): os.remove(entry)
if os.path.islink('data'): os.unlink('data')
if os.path.isfile('tas'): os.remove('tas')
assert (not os.path.isdir('data')), 'A subdirectory *data* exists in this folder, and that prevents the creation of a link to the data directory for synple'
return()
def read_kurucz_model(modelfile):
"""Reads a Kurucz model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
f = open(modelfile,'r')
line = f.readline()
entries = line.split()
assert (entries[0] == 'TEFF' and entries[2] == 'GRAVITY'), 'Cannot find Teff and logg in the file header'
teff = float(entries[1])
logg = float(entries[3])
while entries[0] != 'ABUNDANCE':
line = f.readline()
entries = line.split()
abu = []
if entries[1] == 'SCALE':
scale = float(entries[2])
while entries[0] == 'ABUNDANCE':
i = 0
for word in entries:
if (word == 'CHANGE'): w = i
i = i + 1
for i in range(int((len(entries)-w-1)/2)):
z = int(entries[w+1+2*i])
if (z == 1): nhntot = float(entries[w+2+2*i])
if (z < 3): abu.append(float(entries[w+2+2*i]) / nhntot)
else: abu.append(scale*10.**(float(entries[w+2+2*i])) / nhntot)
line = f.readline()
entries = line.split()
assert (entries[0] == 'READ'), 'I cannot find the header of the atmospheric table in the input Kurucz model'
nd = int(entries[2]) - 1
line = f.readline()
entries = line.split()
line = f.readline()
entries = line.split()
vmicro = float(entries[6])/1e5
dm = [ float(entries[0]) ]
t = [ float(entries[1]) ]
p = [ float(entries[2]) ]
ne = [ float(entries[3]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[0]))
t.append( float(entries[1]))
p.append( float(entries[2]))
ne.append( float(entries[3]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model(modelfile):
"""Reads a MARCS model atmospheres
Parameters
----------
modelfile: str
file name. It can be a gzipped (.gz) file
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
dm = [ float(entries[-1]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
dm.append( float(entries[7]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_marcs_model2(modelfile):
"""Reads a MARCS model atmospheres.
While read_marcs_model returns T, Pg and Ne in the structure 'atmos'
read_marcs_model2 returns T, rho, mmw, and Ne.
Parameters
----------
modelfile: str
file name. It can be a gzipped (.gz) file
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, density,
mean molecular weight and electron number density
"""
if modelfile[-3:] == '.gz':
f = gzip.open(modelfile,'rt')
else:
f = open(modelfile,'r')
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Teff'), 'Cannot find Teff in the file header'
teff = float(entries[0])
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[1] == 'Surface' and entries[2] == 'gravity'), 'Cannot find logg in the file header'
logg = np.log10(float(entries[0]))
line = f.readline()
entries = line.split()
assert (entries[1] == 'Microturbulence'), 'Cannot find vmicro in the file header'
vmicro = float(entries[0])
while entries[0] != 'Logarithmic':
line = f.readline()
entries = line.split()
abu = []
line = f.readline()
entries = line.split()
i = 0
while entries[1] != 'Number':
for word in entries:
abu.append( 10.**(float(word)-12.0) )
i = i + 1
line = f.readline()
entries = line.split()
if i < 99:
for j in range(99-i):
abu.append(1e-111)
i = i + 1
nd = int(entries[0])
line = f.readline()
entries = line.split()
assert (entries[0] == 'Model'), 'I cannot find the header of the atmospheric table in the input MARCS model'
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[4]) ]
p = [ float(entries[6]) ]
ne = [ float(entries[5]) / bolk / float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[4]))
p.append( float(entries[6]))
ne.append( float(entries[5]) / bolk / float(entries[4]))
line = f.readline()
line = f.readline()
entries = line.split()
rho = [ float(entries[3]) ]
dm = [ float(entries[7]) ]
mmw = [ float(entries[4]) ]
for i in range(nd-1):
line = f.readline()
entries = line.split()
rho.append( float(entries[3]))
dm.append( float(entries[7]))
mmw.append( float(entries[4]))
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'rho','mmw','ne'),
'formats':('f', 'f', 'f','f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['rho'] = rho
atmos['mmw'] = mmw
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_model(modelfile):
"""Reads a FITS Phoenix model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
from astropy.io import fits
h = fits.open(modelfile)[0].header
f = fits.open(modelfile)[1].data
nd = len(f['temp'])
teff = float(h['PHXTEFF'])
logg = float(h['PHXLOGG'])
vmicro = float(h['PHXXI_L'])
m_h = float(h['PHXM_H'])
alpha = float(h['PHXALPHA'])
symbol, mass,sol = elements(husser=True)
abu = sol
z_metals = np.arange(97,dtype=int) + 3
z_alphas = np.array([8,10,12,14,16,20,22],dtype=int)
for i in range(len(z_metals)): abu[z_metals[i] - 1] = abu[z_metals[i] - 1] + m_h
for i in range(len(z_alphas)): abu[z_alphas[i] - 1] = abu[z_alphas[i] - 1] + alpha
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = f['pgas'] / 10.**logg
atmos['t'] = f['temp']
atmos['p'] = f['pgas']
atmos['ne'] = f['pe']/ bolk / f['temp']
return (teff,logg,vmicro,abu,nd,atmos)
def read_phoenix_text_model(modelfile):
"""Reads a plain-text Phoenix model atmospheres
Parameters
----------
modelfile: str
file name
Returns
-------
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
f = open(modelfile,'r')
line = f.readline()
while line[0:4] != " no.":
line = f.readline()
entries = line.split()
nd = int(entries[5])
print('nd=',nd)
while line[0:14] != " model: teff":
line = f.readline()
entries = line.split()
teff = float(entries[3])
print('teff=',teff)
line = f.readline()
line = f.readline()
entries = line.split()
assert (entries[0] == 'log(g):' and entries[2] == '[cm/s**2]'), 'Cannot find logg in the file header'
logg = float(entries[1])
print('logg=',logg)
line = f.readline()
while line[0:22] != " Element abundances :":
line = f.readline()
symbol,mass,sol = elements()
sy = []
ab = []
while line[0:29] != " Element abundances relative":
line = f.readline()
#print(line)
if line[0:9] == ' element:':
entries = line.split()
for word in entries[1:]: sy.append(word)
if line[0:11] == ' abundance:':
entries = line.split()
for word in entries[1:]: ab.append(word)
assert (len(sy) == len(ab)), 'different elements in arrays sy (elemental symbols) and ab (abundances)'
abu = np.ones(99)*1e-99
i = 0
for item in sy:
try:
index = symbol.index(item)
abu[index] = 10.**(float(ab[i])-12.)
except ValueError:
print("the symbol ",item," is not recognized as a valid element")
i = i + 1
print('abu=',abu)
while line[0:72] != " l tstd temperature pgas pe density mu":
line = f.readline()
line = f.readline()
entries = line.split()
t = [ float(entries[2].replace('D','E')) ]
p = [ float(entries[3].replace('D','E')) ]
ne = [ float(entries[4].replace('D','E')) / bolk / float(entries[2].replace('D','E')) ]
dm = [ float(entries[3].replace('D','E')) / 10.**logg ] #assuming hydrostatic equil. and negliglible radiation and turb. pressure
for i in range(nd-1):
line = f.readline()
entries = line.split()
t.append( float(entries[2].replace('D','E')))
p.append( float(entries[3].replace('D','E')))
ne.append( float(entries[4].replace('D','E')) / bolk / float(entries[2]))
dm.append ( float(entries[3].replace('D','E')) / 10.**logg )
vmicro = 0.0
while (line[0:6] != " greli"):
line = f.readline()
if line == '':
print('Cannot find a value for vmicro (vturb) in the model atmosphere file ',modelfile)
break
if line != '':
entries = line.split()
vmicro = float(entries[5])
atmos = np.zeros(nd, dtype={'names':('dm', 't', 'p','ne'),
'formats':('f', 'f', 'f','f')})
atmos['dm'] = dm
atmos['t'] = t
atmos['p'] = p
atmos['ne'] = ne
return (teff,logg,vmicro,abu,nd,atmos)
def interp_spl(xout, x, y):
"""Interpolates in 1D using cubic splines
Parameters
----------
x: numpy array or list
input abscissae
y: numpy array or list
input ordinates
xout: numpy array or list
array of abscissae to interpolate to
Returns
-------
yout: numpy array or list
array of interpolated values
"""
tck = interpolate.splrep(x, y, s=0)
yout = interpolate.splev(xout, tck, der=0)
return(yout)
def elements(husser=False):
"""Reads the solar elemental abundances
Parameters
----------
husser: bool, optional
when set the abundances adopted for Phoenix models by Huser et al. (2013)
are adopted. Otherwise Asplund et al. (2005) are used -- consistent with
the MARCS (Gustafsson et al. 2008) models and and Kurucz (Meszaros et al. 2012)
Kurucz model atmospheres.
Returns
-------
symbol: numpy array of str
element symbols
mass: numpy array of floats
atomic masses (elements Z=1-99)
sol: numpy array of floats
solar abundances N/N(H)
"""
symbol = [
'H' ,'He','Li','Be','B' ,'C' ,'N' ,'O' ,'F' ,'Ne',
'Na','Mg','Al','Si','P' ,'S' ,'Cl','Ar','K' ,'Ca',
'Sc','Ti','V' ,'Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y' ,'Zr',
'Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd','In','Sn',
'Sb','Te','I' ,'Xe','Cs','Ba','La','Ce','Pr','Nd',
'Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb',
'Lu','Hf','Ta','W' ,'Re','Os','Ir','Pt','Au','Hg',
'Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th',
'Pa','U' ,'Np','Pu','Am','Cm','Bk','Cf','Es' ]
mass = [ 1.00794, 4.00260, 6.941, 9.01218, 10.811, 12.0107, 14.00674, 15.9994,
18.99840, 20.1797, 22.98977, 24.3050, 26.98154, 28.0855, 30.97376,
32.066, 35.4527, 39.948, 39.0983, 40.078, 44.95591, 47.867, 50.9415,
51.9961, 54.93805, 55.845, 58.93320, 58.6934, 63.546, 65.39, 69.723,
72.61, 74.92160, 78.96, 79.904, 83.80, 85.4678, 87.62, 88.90585,
91.224, 92.90638, 95.94, 98., 101.07, 102.90550, 106.42, 107.8682,
112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.29,
132.90545, 137.327, 138.9055, 140.116, 140.90765, 144.24, 145, 150.36,
151.964, 157.25, 158.92534, 162.50, 164.93032, 167.26, 168.93421,
173.04, 174.967, 178.49, 180.9479, 183.84, 186.207, 190.23, 192.217,
195.078, 196.96655, 200.59, 204.3833, 207.2, 208.98038, 209., 210.,
222., 223., 226., 227., 232.0381, 231.03588, 238.0289, 237., 244.,
243., 247., 247., 251., 252. ]
if not husser:
#Asplund, Grevesse and Sauval (2005), basically the same as
#Grevesse N., Asplund M., Sauval A.J. 2007, Space Science Review 130, 205
sol = [ 0.911, 10.93, 1.05, 1.38, 2.70, 8.39, 7.78, 8.66, 4.56, 7.84,
6.17, 7.53, 6.37, 7.51, 5.36, 7.14, 5.50, 6.18, 5.08, 6.31,
3.05, 4.90, 4.00, 5.64, 5.39, 7.45, 4.92, 6.23, 4.21, 4.60,
2.88, 3.58, 2.29, 3.33, 2.56, 3.28, 2.60, 2.92, 2.21, 2.59,
1.42, 1.92, -9.99, 1.84, 1.12, 1.69, 0.94, 1.77, 1.60, 2.00,
1.00, 2.19, 1.51, 2.27, 1.07, 2.17, 1.13, 1.58, 0.71, 1.45,
-9.99, 1.01, 0.52, 1.12, 0.28, 1.14, 0.51, 0.93, 0.00, 1.08,
0.06, 0.88, -0.17, 1.11, 0.23, 1.45, 1.38, 1.64, 1.01, 1.13,
0.90, 2.00, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.52, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
else:
#a combination of meteoritic/photospheric abundances from Asplund et al. 2009
#chosen for the Husser et al. (2013) Phoenix model atmospheres
sol = [ 12.00, 10.93, 3.26, 1.38, 2.79, 8.43, 7.83, 8.69, 4.56, 7.93,
6.24, 7.60, 6.45, 7.51, 5.41, 7.12, 5.50, 6.40, 5.08, 6.34,
3.15, 4.95, 3.93, 5.64, 5.43, 7.50, 4.99, 6.22, 4.19, 4.56,
3.04, 3.65, 2.30, 3.34, 2.54, 3.25, 2.36, 2.87, 2.21, 2.58,
1.46, 1.88, -9.99, 1.75, 1.06, 1.65, 1.20, 1.71, 0.76, 2.04,
1.01, 2.18, 1.55, 2.24, 1.08, 2.18, 1.10, 1.58, 0.72, 1.42,
-9.99, 0.96, 0.52, 1.07, 0.30, 1.10, 0.48, 0.92, 0.10, 0.92,
0.10, 0.85, -0.12, 0.65, 0.26, 1.40, 1.38, 1.62, 0.80, 1.17,
0.77, 2.04, 0.65, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, 0.06,
-9.99, -0.54, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99, -9.99 ]
sol[0] = 1.
for i in range(len(sol)-1): sol[i+1] = 10.**(sol[i+1]-12.0)
return (symbol,mass,sol)
def lgconv(xinput, yinput, fwhm, ppr=None):
"""convolution with a Gaussian in linear lambda scale
for a constant resolution
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (same units as for xinput)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is linear,
otherwise a subset of the linearly resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to a linear lambda wavelength scale if need be
xx = np.diff(xinput)
if max(xx) - min(xx) > 1.e-7: #input not linearly sampled
nel = len(xinput)
minx = np.min(xinput)
maxx = np.max(xinput)
x = np.linspace(minx,maxx,nel)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else: #input linearly sampled
x = xinput
y = yinput
step = x[1] - x[0]
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
#y = ss.fftconvolve(y,kernel,'valid')
print(npoints)
edge = int(npoints/2)
x = x[edge:-edge]
print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def vgconv(xinput,yinput,fwhm, ppr=None):
"""convolution with a Gaussian in log lambda scale
for a constant resolving power
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
fwhm: float
FWHM of the Gaussian (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.log(xinput[0])
maxx = np.log(xinput[-1])
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
step = np.log(xinput[1])-np.log(xinput[0])
fwhm = fwhm/clight # inverse of the resolving power
sigma=fwhm/2.0/np.sqrt(-2.0*np.log(0.5))
npoints = 2*int(3*fwhm/2./step)+1
half = npoints * step /2.
xx = np.linspace(-half,half,npoints)
kernel = np.exp(-(xx-np.mean(xx))**2/2./sigma**2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
edge = int(npoints/2)
x = x[edge:-edge]
#print(xinput.size,x.size,y.size)
if ppr != None:
fac = int(fwhm / step / ppr)
print(fwhm,step,ppr,fac)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def rotconv(xinput,yinput,vsini, ppr=None):
"""convolution with a Rotation profile
Parameters
----------
xinput: numpy float array
wavelengths
yinput: numpy array of floats
fluxes
vsini: float
projected rotational velocity (km/s)
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default None, to keep the original sampling)
Returns
-------
x: numpy float array
wavelengths after convolution, will be a subset of xinput when that is equidistant
in log lambda, otherwise a subset of the resampled version
y: numpy array of floats
fluxes after convolution
"""
#resampling to ln(lambda) if need be
xx = np.diff(np.log(xinput))
if max(xx) - min(xx) > 1.e-7: #input not equidist in loglambda
nel = len(xinput)
minx = np.min(np.log(xinput))
maxx = np.max(np.log(xinput))
x = np.linspace(minx,maxx,nel)
step = x[1] - x[0]
x = np.exp(x)
#y = np.interp( x, xinput, yinput)
y = interp_spl( x, xinput, yinput)
else:
x = xinput
y = yinput
deltamax=vsini/clight
npoints = 2*int(deltamax/step)+1
xx = np.linspace(-deltamax,deltamax,npoints)
c1=2.0*(1.0-epsilon)/np.pi/(1.0-epsilon/3.0)/deltamax
c2=0.5*epsilon/(1.0-epsilon/3.0)/deltamax
r2=(xx/deltamax)**2
kernel = c1*np.sqrt(1.0-r2)+c2*(1.0-r2)
kernel = kernel/np.sum(kernel)
y = np.convolve(y,kernel,'valid')
print(xinput.size,x.size,y.size)
edge = int(npoints/2)
x = x[edge:-edge]
if ppr != None:
fac = int(deltamax / step / ppr)
subset = np.arange(x.size / fac, dtype=int) * fac
x = x[subset]
y = y[subset]
return(x,y)
def gsynth(synthfile,fwhm=0.0,outsynthfile=None,ppr=5,wrange=None,freeze=None):
"""Smooth the spectra in a FERRE grid by Gaussian convolution
Parameters
----------
synthfile: str
name of the input FERRE synth file
fwhm: float
FWHM of the Gaussian kernel (km/s)
(default is 0.0, which means no convolution is performed)
outsynthfile: str
name of the output FERRE synth file
(default is the same as synth file, but starting with 'n')
ppr: float, optional
Points per resolution element to downsample the convolved spectrum
(default is 5, set to None to keep the original sampling)
wrange: tuple
Starting and ending wavelengths (if a smaller range that
the input's is desired)
(default None, to keep the original range)
freeze: dictionary
Allows to reduce the dimensionality of the grid. The keys are the labels
of the dimensions to freeze (as given in in the header of the input grid)
with the values that should be adopted for those 'frozen' dimensions.
Example: set freeze = {'TEFF': 5000.} to fix that value for the Teff dimension
in a grid.
(default None, to retain all the original dimensions)
Returns
-------
writes outsynthfile with the smooth spectra
"""
if outsynthfile is None: outsynthfile='n'+synthfile[1:]
logw=0
#read header, update and write out
fin = open(synthfile,'r')
fout = open(outsynthfile,'w')
hd = []
labels = []
line = fin.readline()
hd.append(line)
while line[1] != "/":
line = fin.readline()
if "N_P" in line: n_p = np.array(line.split()[2:],dtype=int)
if "STEPS" in line: steps = np.array(line.split()[2:],dtype=float)
if "LLIMITS" in line: llimits = np.array(line.split()[2:],dtype=float)
if "LABEL" in line: labels.append(line.split()[-1][1:-1])
if "NPIX" in line: npix = int(line.split()[2])
if "N_OF_DIM" in line: ndim = int(line.split()[2])
if "WAVE" in line: wave = np.array(line.split()[2:],dtype=float)
if "LOGW" in line: logw = int(line.split()[2])
if "RESOLUTION" in line: resolution = float(line.split()[2])
hd.append(line)
assert (len(n_p) == len(steps) & len(n_p) == len(llimits) & len(n_p) == len(labels) & len(n_p) == ndim), 'The dimension of the parameters from the header are inconsistent'
#update header parameters
x = np.arange(npix)*wave[1]+wave[0]
if logw == 1: x=10.**x
if logw == 2: x=np.exp(x)
#define indices for grid loops
ll = []
ind_n_p = []
i = 0
for entry in labels:
if freeze is not None:
lfkeys = list(freeze.keys())
if entry not in lfkeys: ind_n_p.append(i)
else:
ind_n_p.append(i)
ll.append(np.arange(n_p[i]))
i = i + 1
ind = list(product(*ll))
if wrange is not None:
assert (len(wrange) == 2), 'Error: wrange must have two elements'
section1 = np.where( (x >= wrange[0]*(1.-10.*fwhm/clight)) & (x <= wrange[1]*(1.+10.*fwhm/clight)) )
x = x[section1]
npix = len(x)
if fwhm > 1.e-7:
y = np.ones(npix)
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
print('Warning -- fwhm <= 1.e-7, no convolution will be performed, ppr will be ignored')
xx = x
print(len(x),len(xx))
if wrange is not None:
section2 = np.where( (xx >= wrange[0]) & (xx <= wrange[1]) )
xx = xx [section2]
#print(x,xx)
#print(len(x),len(xx))
jlabel = 0
for line in hd:
if "N_OF_DIM" in line: line = " N_OF_DIM = "+str(len(ind_n_p))+"\n"
if "N_P" in line: line = " N_P = "+' '.join(map(str,n_p[ind_n_p]))+"\n"
if "STEPS" in line: line = " STEPS = "+' '.join(map(str,steps[ind_n_p]))+"\n"
if "LLIMITS" in line: line = " LLIMITS = "+' '.join(map(str,llimits[ind_n_p]))+"\n"
if freeze is not None:
if "LABEL" in line:
ilabel = line.split()[-1][1:-1] #drop starting/ending quotes
if ilabel in lfkeys:
continue
else:
jlabel = jlabel + 1
line = " LABEL("+str(jlabel)+") = "+ilabel+"\n"
if "NPIX" in line: line = " NPIX = "+str(len(xx))+"\n"
if "WAVE" in line: line = " WAVE = "+str(np.log10(xx[0]))+" "+str(np.log10(xx[1])-np.log10(xx[0]))+"\n"
if "LOGW" in line: line = " LOGW = 1 \n"
if "RESOLUTION" in line: line = " RESOLUTION = "+str(clight/np.sqrt(clight**2/resolution**2 + fwhm**2))+"\n"
fout.write(line)
#smooth and write data
k = 0
j = 0
ntot = np.prod(n_p)
for i in ind:
j = j + 1
print('line ',j,' of ',ntot)
#print(k,ntot,i)
#print(i,steps,llimits)
par = i*steps+llimits
line = fin.readline()
if freeze is not None:
skip = True
for entry in lfkeys:
if (abs(freeze[entry] - par[labels.index(entry)]) < 1e-6): skip = False
if skip: continue
y = np.array(line.split(),dtype=float)
if wrange is not None: y = y [section1]
if fwhm > 1.e-7:
xx,yy = vgconv(x,y,fwhm,ppr=ppr)
else:
xx,yy = x, y
if wrange is not None: yy = yy[section2]
yy.tofile(fout,sep=" ",format="%0.4e")
fout.write("\n")
k = k + 1
fin.close()
fout.close()
if __name__ == "__main__":
npar = len(sys.argv)
assert (npar >= 4), 'Synple requires at least 3 input parameters (modelfile wstart wend)'
assert (npar <= 7), 'Synple requires at maximum 6 input parameters (modelfile wstart wend vmicro vrot fwhm)'
vmicro = None
vrot = 0.0
fwhm = 0.0
modelfile = sys.argv[1]
wstart = float(sys.argv[2])
wend = float(sys.argv[3])
if (npar > 4):
vmicro = float(sys.argv[4])
if (npar > 5):
fwhm = float(sys.argv[5])
if (npar > 6):
vrot = float(sys.argv[6])
#symbol, mass, sol = elements()
x, y, z = syn(modelfile, (wstart,wend), save=True, vmicro=vmicro, vrot=vrot, fwhm=fwhm)
| """Reads a model atmosphere into a structure
Parameters
----------
modelfile : str
file with a model atmosphere
Returns
-------
atmostype : str
type of model atmosphere (kurucz/marcs/phoenix)
teff : float
effective temperature (K)
logg : float
log10 of the surface gravity (cm s-2)
vmicro : float
microturbulence velocity (km/s)
abu : list
abundances, number densities of nuclei relative to hydrogen N(X)/N(H)
for elements Z=1,99 (H to Es)
nd: int
number of depths (layers) of the model
atmos: numpy structured array
array with the run with depth of column mass, temperature, gas pressure
and electron density
"""
#check
if not os.path.isfile(modelfile):
mf = os.path.join(modeldir,modelfile)
if os.path.isfile(mf): modelfile = mf
atmostype = identify_atmostype(modelfile)
if atmostype == 'kurucz':
teff, logg, vmicro, abu, nd, atmos = read_kurucz_model(modelfile)
if atmostype == 'marcs':
teff, logg, vmicro, abu, nd, atmos = read_marcs_model2(modelfile)
if atmostype == 'phoenix':
teff, logg, vmicro, abu, nd, atmos = read_phoenix_model(modelfile)
return (atmostype,teff,logg,vmicro,abu,nd,atmos) |
IconAdb.tsx | import classNames from 'classnames';
import { ComponentClassNames } from '../../shared';
import { View } from '../../View';
export const IconAdb = (props) => {
const { className, ...rest } = props;
return (
<View
as="span"
width="1em"
height="1em" | className={classNames(ComponentClassNames.Icon, className)}
{...rest}
>
<svg
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
d="M5 15.9999C5 19.8699 8.13 22.9999 12 22.9999C15.87 22.9999 19 19.8699 19 15.9999V11.9999H5V15.9999ZM16.12 4.36994L18.22 2.26994L17.4 1.43994L15.1 3.74994C14.16 3.27994 13.12 2.99994 12 2.99994C10.88 2.99994 9.84 3.27994 8.91 3.74994L6.6 1.43994L5.78 2.26994L7.88 4.36994C6.14 5.63994 5 7.67994 5 9.99994V10.9999H19V9.99994C19 7.67994 17.86 5.63994 16.12 4.36994ZM9 8.99994C8.45 8.99994 8 8.54994 8 7.99994C8 7.44994 8.45 6.99994 9 6.99994C9.55 6.99994 10 7.44994 10 7.99994C10 8.54994 9.55 8.99994 9 8.99994ZM15 8.99994C14.45 8.99994 14 8.54994 14 7.99994C14 7.44994 14.45 6.99994 15 6.99994C15.55 6.99994 16 7.44994 16 7.99994C16 8.54994 15.55 8.99994 15 8.99994Z"
fill="currentColor"
/>
</svg>
</View>
);
}; | |
shell_compatibility_version.rs | // Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use tezos_messages::p2p::encoding::ack::NackMotive;
use tezos_messages::p2p::encoding::prelude::NetworkVersion;
/// Holds informations about supported versions:
/// - all distributed_db_versions
/// - all p2p_versions
/// - version -> version used for bootstrap
#[derive(Serialize, Deserialize, Eq, PartialEq, Clone, Debug)]
pub struct ShellCompatibilityVersion {
/// All supported distributed_db_versions
distributed_db_versions: Vec<u16>,
/// All supported p2p_versions
p2p_versions: Vec<u16>,
/// version of network protocol, which we send to other peers
version: NetworkVersion,
}
impl ShellCompatibilityVersion {
const DEFAULT_VERSION: u16 = 0u16;
pub fn new(
chain_name: String,
distributed_db_versions: Vec<u16>,
p2p_versions: Vec<u16>,
) -> Self {
Self {
version: NetworkVersion::new(
chain_name,
*distributed_db_versions
.iter()
.max()
.unwrap_or(&Self::DEFAULT_VERSION),
*p2p_versions.iter().max().unwrap_or(&Self::DEFAULT_VERSION),
),
distributed_db_versions,
p2p_versions,
}
}
/// Returns Ok(version), if version is compatible, returns calculated compatible version for later use (NetworkVersion can contains feature support).
/// Return Err(NackMotive), if something is wrong
pub fn choose_compatible_version(
&self,
requested: &NetworkVersion,
) -> Result<NetworkVersion, NackMotive> {
if !self.version.chain_name().eq(requested.chain_name()) {
return Err(NackMotive::UnknownChainName);
} | Self::select_compatible_version(
&self.distributed_db_versions,
requested.distributed_db_version(),
NackMotive::DeprecatedDistributedDbVersion,
)?,
Self::select_compatible_version(
&self.p2p_versions,
requested.p2p_version(),
NackMotive::DeprecatedP2pVersion,
)?,
))
}
pub fn to_network_version(&self) -> NetworkVersion {
self.version.clone()
}
fn select_compatible_version(
supported_versions: &Vec<u16>,
requested_version: &u16,
nack_motive: NackMotive,
) -> Result<u16, NackMotive> {
let best_supported_version = supported_versions
.iter()
.max()
.unwrap_or(&Self::DEFAULT_VERSION);
if best_supported_version <= requested_version {
return Ok(*best_supported_version);
}
if supported_versions.contains(requested_version) {
return Ok(*requested_version);
}
Err(nack_motive)
}
}
#[cfg(test)]
mod tests {
use tezos_messages::p2p::encoding::ack::NackMotive;
use tezos_messages::p2p::encoding::version::NetworkVersion;
use super::*;
#[test]
fn test_shell_version() {
let tested =
ShellCompatibilityVersion::new("TEST_CHAIN".to_string(), vec![3, 4], vec![1, 2]);
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_XYZ".to_string(), 0, 0)),
Err(NackMotive::UnknownChainName)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 0, 0)),
Err(NackMotive::DeprecatedDistributedDbVersion)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 1, 0)),
Err(NackMotive::DeprecatedDistributedDbVersion)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 2, 0)),
Err(NackMotive::DeprecatedDistributedDbVersion)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 3, 0)),
Err(NackMotive::DeprecatedP2pVersion)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 4, 0)),
Err(NackMotive::DeprecatedP2pVersion)
));
assert!(matches!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 5, 0)),
Err(NackMotive::DeprecatedP2pVersion)
));
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 3, 1)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 3, 1))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 3, 2)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 3, 2))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 3, 3)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 3, 2))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 4, 1)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 1))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 4, 3)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 5, 1)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 1))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 5, 2)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2))
);
assert_eq!(
tested.choose_compatible_version(&NetworkVersion::new("TEST_CHAIN".to_string(), 5, 3)),
Ok(NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2))
);
assert_eq!(
tested.to_network_version(),
NetworkVersion::new("TEST_CHAIN".to_string(), 4, 2)
);
}
} |
Ok(NetworkVersion::new(
self.version.chain_name().clone(), |
database.py | from datetime import datetime
import json
from pathlib import Path
import pymssql
config_json: dict = json.loads(Path('config.json').read_text())
# Connecting to database
def connect():
global config_json
# Connect to Microsoft SQL server
conn = pymssql.connect(
server=config_json['server'],
user=config_json['user'],
password=config_json['password'],
database=config_json['database']
)
return conn
def student_exists(sid: int) -> bool:
conn = connect()
cursor = conn.cursor()
# Correct request
cursor.execute('select COUNT(1) from StudentInfo where SID=' + str(sid) + ';')
result = cursor.fetchone()
conn.close()
return str(result[0]) == '1'
def student_info_get() -> dict:
conn = connect()
cursor = conn.cursor()
# Get all students from database
cursor.execute('select * from StudentInfo order by SID;')
# Convert table into json
data: dict = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
# Creating json table with data
data['elements'].append(
# Creating json table with data
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"fName": "' + row[1] + '",' +
'"lName": "' + row[2] + '"}'
)
)
row = cursor.fetchone()
# While end
conn.close()
return data
def student_info_insert(info: dict) -> None:
# Get values from JSON
sid = str(info['SID'])
f_name = '\'' + info['fName'] + '\''
l_name = '\'' + info['lName'] + '\''
# Add entry to database
sql_req = 'insert into StudentInfo values (' + sid + ',' + f_name + ',' + l_name + ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
def | (sid: int) -> None:
conn = connect()
cursor = conn.cursor()
cursor.execute('delete from StudentInfo where SID=' + str(sid) + ';')
conn.commit()
conn.close()
def presence_get() -> dict:
conn = connect()
cursor = conn.cursor()
cursor.execute('select * from Presence order by Date;')
# Convert table into json
data = json.loads('{"elements":[]}')
row = cursor.fetchone()
while row:
# Creating json table with data
data['elements'].append(
# Creating json table with data
json.loads(
'{"SID":' + str(row[0]) + ',' +
'"Date": "' + row[1].strftime('%Y-%m-%d %H:%M:%S') + '",' +
'"CID": "' + str(row[2]) + '",' +
'"Room": "' + row[3] + '"}'
)
)
row = cursor.fetchone()
# While end
conn.close()
return data
def presence_insert(info: dict):
# Get values from JSONs
sid = str(info.get('SID'))
date = '\'' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\''
cid = info.get('CID')
room = '\'' + info.get('Room') + '\''
# Request building
sql_req = 'insert into Presence values (' + sid + ',' + date
if cid is not None:
sql_req += ',' + str(cid)
if len(room) > 2:
sql_req += ',' + room
sql_req += ');'
conn = connect()
cursor = conn.cursor()
cursor.execute(sql_req)
conn.commit()
conn.close()
print('Finished sql req!')
| student_info_delete |
contact.js | // @flow | export default () => (
<Page title="Contact us">
<div>Contact</div>
</Page>
) | import React from 'react'
import Page from '../components/Page'
|
test_auth.py | #!/usr/bin/env python
"""Tests admin-related functionality"""
import os
from contextlib import contextmanager
from time import sleep
import pytest
import python_pachyderm
from python_pachyderm.experimental.service import auth_proto, identity_proto
from tests import util
# bp_to_pb: OidcConfig -> OIDCConfig
@pytest.fixture
def client():
pc = python_pachyderm.experimental.Client()
pc.activate_license(os.environ["PACH_PYTHON_ENTERPRISE_CODE"])
pc.add_cluster("localhost", "localhost:1650", secret="secret")
pc.activate_enterprise("localhost:1650", "localhost", "secret")
pc.auth_token = "iamroot"
pc.activate_auth(pc.auth_token)
pc.set_identity_server_config(
config=identity_proto.IdentityServerConfig(issuer="http://localhost:1658")
)
yield pc
# not redundant because auth_token could be overriden by tests
pc.auth_token = "iamroot"
try:
pc.delete_all_identity()
except Exception:
pass
try:
pc.delete_all_license()
except Exception:
pass
try:
pc.deactivate_auth()
except Exception:
pass
pc.deactivate_enterprise()
@util.skip_if_no_enterprise()
def test_auth_configuration(client):
client.get_auth_configuration()
client.set_auth_configuration(
auth_proto.OidcConfig(
issuer="http://localhost:1658",
client_id="client",
client_secret="secret",
redirect_uri="http://test.example.com",
)
)
@util.skip_if_no_enterprise()
def | (client):
cluster_resource = auth_proto.Resource(type=auth_proto.ResourceType.CLUSTER)
binding = client.get_role_binding(cluster_resource)
assert binding["pach:root"].roles["clusterAdmin"]
client.modify_role_binding(
cluster_resource, "robot:someuser", roles=["clusterAdmin"]
)
binding = client.get_role_binding(cluster_resource)
assert binding["robot:someuser"].roles["clusterAdmin"]
@util.skip_if_no_enterprise()
def test_authorize(client):
client.authorize(
auth_proto.Resource(type=auth_proto.ResourceType.REPO, name="foobar"),
[auth_proto.Permission.REPO_READ],
)
@util.skip_if_no_enterprise()
def test_who_am_i(client):
assert client.who_am_i().username == "pach:root"
@util.skip_if_no_enterprise()
def test_get_roles_for_permission(client):
# Checks built-in roles
roles = client.get_roles_for_permission(auth_proto.Permission.REPO_READ)
for r in roles:
assert auth_proto.Permission.REPO_READ in r.permissions
roles = client.get_roles_for_permission(
auth_proto.Permission.CLUSTER_GET_PACHD_LOGS
)
for r in roles:
assert auth_proto.Permission.CLUSTER_GET_PACHD_LOGS in r.permissions
@util.skip_if_no_enterprise()
def test_robot_token(client):
auth_token = client.get_robot_token("robot:root", ttl=30)
client.auth_token = auth_token
assert client.who_am_i().username == "robot:root"
client.revoke_auth_token(auth_token)
with pytest.raises(python_pachyderm.RpcError):
client.who_am_i()
@util.skip_if_no_enterprise()
def test_groups(client):
assert client.get_groups() == []
client.set_groups_for_user("pach:root", ["foogroup"])
assert client.get_groups() == ["foogroup"]
assert client.get_users("foogroup") == ["pach:root"]
client.modify_members("foogroup", remove=["pach:root"])
assert client.get_groups() == []
assert client.get_users("foogroup") == []
| test_cluster_role_bindings |
_load_balancer_backend_address_pools_operations.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations(object):
"""LoadBalancerBackendAddressPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2017_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def | (self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.LoadBalancerBackendAddressPoolListResult"]
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2017_11_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
def get(
self,
resource_group_name, # type: str
load_balancer_name, # type: str
backend_address_pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.BackendAddressPool"
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2017_11_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-11-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
| __init__ |
datera_iscsi.py | # Copyright 2017 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import uuid
from eventlet.green import threading
from oslo_config import cfg
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _
from cinder import utils
from cinder.volume import configuration
from cinder.volume.drivers.san import san
import cinder.volume.drivers.datera.datera_api2 as api2
import cinder.volume.drivers.datera.datera_api21 as api21
import cinder.volume.drivers.datera.datera_common as datc
LOG = logging.getLogger(__name__)
d_opts = [
cfg.StrOpt('datera_api_port',
default='7717',
help='Datera API port.'),
cfg.StrOpt('datera_api_version',
default='2',
deprecated_for_removal=True,
help='Datera API version.'),
cfg.IntOpt('datera_503_timeout',
default='120',
help='Timeout for HTTP 503 retry messages'),
cfg.IntOpt('datera_503_interval',
default='5',
help='Interval between 503 retries'),
cfg.BoolOpt('datera_debug',
default=False,
help="True to set function arg and return logging"),
cfg.BoolOpt('datera_debug_replica_count_override',
default=False,
help="ONLY FOR DEBUG/TESTING PURPOSES\n"
"True to set replica_count to 1"),
cfg.StrOpt('datera_tenant_id',
default=None,
help="If set to 'Map' --> OpenStack project ID will be mapped "
"implicitly to Datera tenant ID\n"
"If set to 'None' --> Datera tenant ID will not be used "
"during volume provisioning\n"
"If set to anything else --> Datera tenant ID will be the "
"provided value"),
cfg.BoolOpt('datera_disable_profiler',
default=False,
help="Set to True to disable profiling in the Datera driver"),
]
CONF = cfg.CONF
CONF.import_opt('driver_use_ssl', 'cinder.volume.driver')
CONF.register_opts(d_opts, group=configuration.SHARED_CONF_GROUP)
@six.add_metaclass(utils.TraceWrapperWithABCMetaclass)
class DateraDriver(san.SanISCSIDriver, api2.DateraApi, api21.DateraApi):
"""The OpenStack Datera Driver
Version history:
* 1.0 - Initial driver
* 1.1 - Look for lun-0 instead of lun-1.
* 2.0 - Update For Datera API v2
* 2.1 - Multipath, ACL and reorg
* 2.2 - Capabilites List, Extended Volume-Type Support
Naming convention change,
Volume Manage/Unmanage support
* 2.3 - Templates, Tenants, Snapshot Polling,
2.1 Api Version Support, Restructure
* 2.3.1 - Scalability bugfixes
* 2.3.2 - Volume Placement, ACL multi-attach bugfix
* 2.4.0 - Fast Retype Support
"""
VERSION = '2.4.0'
CI_WIKI_NAME = "datera-ci"
HEADER_DATA = {'Datera-Driver': 'OpenStack-Cinder-{}'.format(VERSION)}
# TODO(jsbryant) Remove driver in the 'U' release if CI is not fixed.
SUPPORTED = False
def __init__(self, *args, **kwargs):
super(DateraDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(d_opts)
self.username = self.configuration.san_login
self.password = self.configuration.san_password
self.cluster_stats = {}
self.datera_api_token = None
self.interval = self.configuration.datera_503_interval
self.retry_attempts = (self.configuration.datera_503_timeout /
self.interval)
self.driver_prefix = str(uuid.uuid4())[:4]
self.datera_debug = self.configuration.datera_debug
self.datera_api_versions = []
if self.datera_debug:
utils.setup_tracing(['method'])
self.tenant_id = self.configuration.datera_tenant_id
if self.tenant_id and self.tenant_id.lower() == 'none':
self.tenant_id = None
self.api_check = time.time()
self.api_cache = []
self.api_timeout = 0
self.do_profile = not self.configuration.datera_disable_profiler
self.thread_local = threading.local()
backend_name = self.configuration.safe_get(
'volume_backend_name')
self.backend_name = backend_name or 'Datera'
datc.register_driver(self)
def do_setup(self, context):
# If we can't authenticate through the old and new method, just fail
# now.
if not all([self.username, self.password]):
msg = _("san_login and/or san_password is not set for Datera "
"driver in the cinder.conf. Set this information and "
"start the cinder-volume service again.")
LOG.error(msg)
raise exception.InvalidInput(msg)
self.login()
self._create_tenant()
# =================
# =================
# = Create Volume =
# =================
@datc._api_lookup
def | (self, volume):
"""Create a logical volume."""
pass
# =================
# = Extend Volume =
# =================
@datc._api_lookup
def extend_volume(self, volume, new_size):
pass
# =================
# =================
# = Cloned Volume =
# =================
@datc._api_lookup
def create_cloned_volume(self, volume, src_vref):
pass
# =================
# = Delete Volume =
# =================
@datc._api_lookup
def delete_volume(self, volume):
pass
# =================
# = Ensure Export =
# =================
@datc._api_lookup
def ensure_export(self, context, volume, connector=None):
"""Gets the associated account, retrieves CHAP info and updates."""
# =========================
# = Initialize Connection =
# =========================
@datc._api_lookup
def initialize_connection(self, volume, connector):
pass
# =================
# = Create Export =
# =================
@datc._api_lookup
def create_export(self, context, volume, connector):
pass
# =================
# = Detach Volume =
# =================
@datc._api_lookup
def detach_volume(self, context, volume, attachment=None):
pass
# ===================
# = Create Snapshot =
# ===================
@datc._api_lookup
def create_snapshot(self, snapshot):
pass
# ===================
# = Delete Snapshot =
# ===================
@datc._api_lookup
def delete_snapshot(self, snapshot):
pass
# ========================
# = Volume From Snapshot =
# ========================
@datc._api_lookup
def create_volume_from_snapshot(self, volume, snapshot):
pass
# ==========
# = Retype =
# ==========
@datc._api_lookup
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
pass
# ==========
# = Manage =
# ==========
@datc._api_lookup
def manage_existing(self, volume, existing_ref):
"""Manage an existing volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
(existing_ref['source-name'] ==
tenant:app_inst_name:storage_inst_name:vol_name)
if using Datera 2.1 API
or
(existing_ref['source-name'] ==
app_inst_name:storage_inst_name:vol_name)
if using 2.0 API
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
pass
# ===================
# = Manage Get Size =
# ===================
@datc._api_lookup
def manage_existing_get_size(self, volume, existing_ref):
"""Get the size of an unmanaged volume on the Datera backend
The existing_ref must be either the current name or Datera UUID of
an app_instance on the Datera backend in a colon separated list with
the storage instance name and volume name. This means only
single storage instances and single volumes are supported for
managing by cinder.
Eg.
existing_ref == app_inst_name:storage_inst_name:vol_name
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume on the Datera backend
"""
pass
# =========================
# = Get Manageable Volume =
# =========================
@datc._api_lookup
def get_manageable_volumes(self, cinder_volumes, marker, limit, offset,
sort_keys, sort_dirs):
"""List volumes on the backend available for management by Cinder.
Returns a list of dictionaries, each specifying a volume in the host,
with the following keys:
- reference (dictionary): The reference for a volume, which can be
passed to 'manage_existing'.
- size (int): The size of the volume according to the storage
backend, rounded up to the nearest GB.
- safe_to_manage (boolean): Whether or not this volume is safe to
manage according to the storage backend. For example, is the volume
in use or invalid for any reason.
- reason_not_safe (string): If safe_to_manage is False, the reason why.
- cinder_id (string): If already managed, provide the Cinder ID.
- extra_info (string): Any extra information to return to the user
:param cinder_volumes: A list of volumes in this host that Cinder
currently manages, used to determine if
a volume is manageable or not.
:param marker: The last item of the previous page; we return the
next results after this value (after sorting)
:param limit: Maximum number of items to return
:param offset: Number of items to skip after marker
:param sort_keys: List of keys to sort results by (valid keys are
'identifier' and 'size')
:param sort_dirs: List of directions to sort by, corresponding to
sort_keys (valid directions are 'asc' and 'desc')
"""
pass
# ============
# = Unmanage =
# ============
@datc._api_lookup
def unmanage(self, volume):
"""Unmanage a currently managed volume in Cinder
:param volume: Cinder volume to unmanage
"""
pass
# ================
# = Volume Stats =
# ================
@datc._api_lookup
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data.
"""
pass
# =========
# = Login =
# =========
@datc._api_lookup
def login(self):
pass
# =======
# = QoS =
# =======
def _update_qos(self, resource, policies):
url = datc.URL_TEMPLATES['vol_inst'](
policies['default_storage_name'],
policies['default_volume_name']) + '/performance_policy'
url = url.format(datc._get_name(resource['id']))
type_id = resource.get('volume_type_id', None)
if type_id is not None:
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
if fpolicies:
self._issue_api_request(url, 'post', body=fpolicies,
api_version='2')
def _get_lunid(self):
return 0
# ============================
# = Volume-Types/Extra-Specs =
# ============================
def _init_vendor_properties(self):
"""Create a dictionary of vendor unique properties.
This method creates a dictionary of vendor unique properties
and returns both created dictionary and vendor name.
Returned vendor name is used to check for name of vendor
unique properties.
- Vendor name shouldn't include colon(:) because of the separator
and it is automatically replaced by underscore(_).
ex. abc:d -> abc_d
- Vendor prefix is equal to vendor name.
ex. abcd
- Vendor unique properties must start with vendor prefix + ':'.
ex. abcd:maxIOPS
Each backend driver needs to override this method to expose
its own properties using _set_property() like this:
self._set_property(
properties,
"vendorPrefix:specific_property",
"Title of property",
_("Description of property"),
"type")
: return dictionary of vendor unique properties
: return vendor name
prefix: DF --> Datera Fabric
"""
properties = {}
self._set_property(
properties,
"DF:placement_mode",
"Datera Volume Placement",
_("'single_flash' for single-flash-replica placement, "
"'all_flash' for all-flash-replica placement, "
"'hybrid' for hybrid placement"),
"string",
default="hybrid")
self._set_property(
properties,
"DF:round_robin",
"Datera Round Robin Portals",
_("True to round robin the provided portals for a target"),
"boolean",
default=False)
if self.configuration.get('datera_debug_replica_count_override'):
replica_count = 1
else:
replica_count = 3
self._set_property(
properties,
"DF:replica_count",
"Datera Volume Replica Count",
_("Specifies number of replicas for each volume. Can only be "
"increased once volume is created"),
"integer",
minimum=1,
default=replica_count)
self._set_property(
properties,
"DF:acl_allow_all",
"Datera ACL Allow All",
_("True to set acl 'allow_all' on volumes created. Cannot be "
"changed on volume once set"),
"boolean",
default=False)
self._set_property(
properties,
"DF:ip_pool",
"Datera IP Pool",
_("Specifies IP pool to use for volume"),
"string",
default="default")
self._set_property(
properties,
"DF:template",
"Datera Template",
_("Specifies Template to use for volume provisioning"),
"string",
default="")
# ###### QoS Settings ###### #
self._set_property(
properties,
"DF:read_bandwidth_max",
"Datera QoS Max Bandwidth Read",
_("Max read bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:default_storage_name",
"Datera Default Storage Instance Name",
_("The name to use for storage instances created"),
"string",
default="storage-1")
self._set_property(
properties,
"DF:default_volume_name",
"Datera Default Volume Name",
_("The name to use for volumes created"),
"string",
default="volume-1")
self._set_property(
properties,
"DF:write_bandwidth_max",
"Datera QoS Max Bandwidth Write",
_("Max write bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_bandwidth_max",
"Datera QoS Max Bandwidth Total",
_("Max total bandwidth setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:read_iops_max",
"Datera QoS Max iops Read",
_("Max read iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:write_iops_max",
"Datera QoS Max IOPS Write",
_("Max write iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
self._set_property(
properties,
"DF:total_iops_max",
"Datera QoS Max IOPS Total",
_("Max total iops setting for volume qos, "
"use 0 for unlimited"),
"integer",
minimum=0,
default=0)
# ###### End QoS Settings ###### #
return properties, 'DF'
| create_volume |
proto3.pb.go | // Code generated by protoc-gen-gogo.
// source: proto3.proto
// DO NOT EDIT!
/*
Package vanity is a generated protocol buffer package.
It is generated from these files:
proto3.proto
It has these top-level messages:
Aproto3
*/
package vanity
import proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
import fmt "fmt"
import math "math"
import strings "strings"
import github_com_gogo_protobuf_proto "gx/ipfs/QmZ4Qi3GaRbjcx28Sme5eMH7RQjGkt8wHxt2a65oLaeFEV/gogo-protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type Aproto3 struct {
B string `protobuf:"bytes,1,opt,name=B,proto3" json:"B,omitempty"`
}
func (m *Aproto3) Reset() { *m = Aproto3{} }
func (*Aproto3) ProtoMessage() {}
func init() {
proto.RegisterType((*Aproto3)(nil), "vanity.Aproto3")
}
func (this *Aproto3) Equal(that interface{}) bool {
if that == nil {
if this == nil {
return true
}
return false
}
that1, ok := that.(*Aproto3)
if !ok {
that2, ok := that.(Aproto3)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
if this == nil {
return true
}
return false
} else if this == nil {
return false
}
if this.B != that1.B {
return false
}
return true
}
func (this *Aproto3) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&vanity.Aproto3{")
s = append(s, "B: "+fmt.Sprintf("%#v", this.B)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringProto3(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringProto3(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
if e == nil {
return "nil"
}
s := "map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "}"
return s
}
func (m *Aproto3) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *Aproto3) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.B) > 0 {
data[i] = 0xa
i++
i = encodeVarintProto3(data, i, uint64(len(m.B)))
i += copy(data[i:], m.B)
}
return i, nil
}
func encodeFixed64Proto3(data []byte, offset int, v uint64) int |
func encodeFixed32Proto3(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintProto3(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *Aproto3) Size() (n int) {
var l int
_ = l
l = len(m.B)
if l > 0 {
n += 1 + l + sovProto3(uint64(l))
}
return n
}
func sovProto3(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozProto3(x uint64) (n int) {
return sovProto3(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Aproto3) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Aproto3{`,
`B:` + fmt.Sprintf("%v", this.B) + `,`,
`}`,
}, "")
return s
}
func valueToStringProto3(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Aproto3) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProto3
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Aproto3: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Aproto3: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field B", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowProto3
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthProto3
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.B = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipProto3(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthProto3
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipProto3(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProto3
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProto3
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProto3
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthProto3
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowProto3
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipProto3(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthProto3 = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowProto3 = fmt.Errorf("proto: integer overflow")
)
| {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
} |
checkout.styles.tsx | import styled from 'styled-components';
export const CheckoutContainer = styled.div`
width: 55%;
min-height: 90vh; | align-items: center;
margin: 50px auto 0;
`;
export const HeaderContainer = styled.div`
width: 100%;
padding: 10px 0;
display: flex;
justify-content: space-between;
border-bottom: 1px solid darkgrey;
`;
export const HeaderBlockContainer = styled.div`
text-transform: capitalize;
width: 23%;
&:last-child {
width: 8%;
}
`;
export const TotalContainer = styled.div`
margin-top: 30px;
margin-left: auto;
font-size: 36px;
text-transform: uppercase;
`;
export const StripeButtonContainer = styled.div`
margin-left: auto;
margin-top: 50px;
`;
export const WarningContainer = styled.div`
color: red;
text-align: center;
margin-top: 40px;
font-size: 24px;
`; | display: flex;
flex-direction: column; |
tiled.go | package nitro
import (
"image"
"image/color"
)
// Tiled is an image.Image whose pixels are stored as a sequence of 8x8 tiles.
// Since it is conceptually one-dimensional, its bounds may be undefined.
type Tiled struct {
Pix []uint8
Stride int // number of tiles per row
Rect image.Rectangle
Palette color.Palette
}
func (t *Tiled) ColorModel() color.Model { return t.Palette }
func (t *Tiled) Bounds() image.Rectangle { return t.Rect }
// Tile returns an image representing a portion of t.
// The upper left tile of the returned image will be the nth tile in t,
// and the tiles following the nth tile will fill the remaining width and
// height of the returned image from left to right, top to bottom.
// The returned value shares pixels with the original image.
func (t *Tiled) Tile(n, width, height int) *Tiled {
if n*64 >= len(t.Pix) {
return &Tiled{
Palette: t.Palette,
}
}
r := image.Rect(0, 0, width, height)
stride := (width + 7) / 8
return &Tiled{
Pix: t.Pix[n*64:],
Rect: r,
Stride: stride,
Palette: t.Palette,
}
}
// PixOffset returns the index Pix that corresponds to the pixel at (x, y).
func (t *Tiled) PixOffset(x, y int) int {
// TODO: try to get this under the inlining limit
x, y = x-t.Rect.Min.X, y-t.Rect.Min.Y
return (y/8*t.Stride+x/8)*64 + y%8*8 + x%8
}
func (t *Tiled) ColorIndexAt(x, y int) uint8 {
if !image.Pt(x, y).In(t.Rect) {
return 0
}
i := t.PixOffset(x, y)
if i >= len(t.Pix) {
return 0
}
return t.Pix[i]
}
func (t *Tiled) SetColorIndex(x, y int, index uint8) {
if !image.Pt(x, y).In(t.Rect) {
return
}
i := t.PixOffset(x, y)
if i >= len(t.Pix) {
return
}
t.Pix[i] = index
}
func (t *Tiled) At(x, y int) color.Color {
if len(t.Palette) == 0 {
return nil
}
if !image.Pt(x, y).In(t.Rect) {
return t.Palette[0]
}
i := t.PixOffset(x, y)
if i >= len(t.Pix) {
return t.Palette[0]
}
return t.Palette[t.Pix[i]]
}
func (t *Tiled) Set(x, y int, c color.Color) {
if !image.Pt(x, y).In(t.Rect) {
return
}
i := t.PixOffset(x, y)
if i >= len(t.Pix) {
return
}
t.Pix[i] = uint8(t.Palette.Index(c))
}
| Pix: make([]uint8, r.Dx()*r.Dy()),
Rect: r,
Stride: r.Dx() / 8,
Palette: pal,
}
} | func NewTiled(r image.Rectangle, pal color.Palette) *Tiled {
return &Tiled{ |
machine_indices.rs | use crate::parser::ast::*;
use crate::arena::*;
use crate::atom_table::*;
use crate::fixtures::*;
use crate::forms::*;
use crate::instructions::*;
use crate::machine::loader::*;
use crate::machine::machine_state::*;
use crate::machine::streams::Stream;
use fxhash::FxBuildHasher;
use indexmap::IndexMap;
use std::cell::Cell;
use std::cmp::Ordering;
use std::collections::BTreeSet;
use std::ops::Deref;
use std::rc::Rc;
use crate::types::*;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct OrderedOpDirKey(pub(crate) Atom, pub(crate) Fixity);
pub(crate) type OssifiedOpDir = IndexMap<(Atom, Fixity), (usize, Specifier)>;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum DBRef {
NamedPred(Atom, usize),
Op(Atom, Fixity, TypedArenaPtr<OssifiedOpDir>),
}
// 7.2
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TermOrderCategory {
Variable,
FloatingPoint,
Integer,
Atom,
Compound,
}
impl PartialEq<Ref> for HeapCellValue {
fn eq(&self, r: &Ref) -> bool {
self.as_var() == Some(*r)
}
}
impl PartialOrd<Ref> for HeapCellValue {
fn partial_cmp(&self, r: &Ref) -> Option<Ordering> {
read_heap_cell!(*self,
(HeapCellValueTag::StackVar, s1) => {
match r.get_tag() {
RefTag::StackCell => {
let s2 = r.get_value() as usize;
s1.partial_cmp(&s2)
}
_ => Some(Ordering::Greater),
}
}
(HeapCellValueTag::AttrVar | HeapCellValueTag::Var, h1) => {
// _ if self.is_ref() => {
// let h1 = self.get_value();
match r.get_tag() {
RefTag::StackCell => Some(Ordering::Less),
_ => {
let h2 = r.get_value() as usize;
h1.partial_cmp(&h2)
}
}
}
_ => {
None
}
)
}
}
#[derive(Debug, Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum IndexPtr {
DynamicUndefined, // a predicate, declared as dynamic, whose location in code is as yet undefined.
DynamicIndex(usize),
Index(usize),
Undefined,
}
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct CodeIndex(pub(crate) Rc<Cell<IndexPtr>>);
impl Deref for CodeIndex {
type Target = Cell<IndexPtr>;
#[inline]
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl CodeIndex {
#[inline]
pub(super) fn new(ptr: IndexPtr) -> Self {
CodeIndex(Rc::new(Cell::new(ptr)))
}
#[inline]
pub(crate) fn is_undefined(&self) -> bool {
match self.0.get() {
IndexPtr::Undefined => true, // | &IndexPtr::DynamicUndefined => true,
_ => false,
}
}
pub(crate) fn local(&self) -> Option<usize> {
match self.0.get() {
IndexPtr::Index(i) => Some(i),
IndexPtr::DynamicIndex(i) => Some(i),
_ => None,
}
}
}
impl Default for CodeIndex {
fn default() -> Self {
CodeIndex(Rc::new(Cell::new(IndexPtr::Undefined)))
}
}
pub(crate) type HeapVarDict = IndexMap<Rc<String>, HeapCellValue, FxBuildHasher>;
pub(crate) type AllocVarDict = IndexMap<Rc<String>, VarData, FxBuildHasher>;
pub(crate) type GlobalVarDir = IndexMap<Atom, (Ball, Option<HeapCellValue>), FxBuildHasher>;
pub(crate) type StreamAliasDir = IndexMap<Atom, Stream, FxBuildHasher>;
pub(crate) type StreamDir = BTreeSet<Stream>;
pub(crate) type MetaPredicateDir = IndexMap<PredicateKey, Vec<MetaSpec>, FxBuildHasher>;
pub(crate) type ExtensiblePredicates = IndexMap<PredicateKey, PredicateSkeleton, FxBuildHasher>;
pub(crate) type LocalExtensiblePredicates =
IndexMap<(CompilationTarget, PredicateKey), LocalPredicateSkeleton, FxBuildHasher>;
pub(crate) type CodeDir = IndexMap<PredicateKey, CodeIndex, FxBuildHasher>;
#[derive(Debug)]
pub struct IndexStore {
pub(super) code_dir: CodeDir,
pub(super) extensible_predicates: ExtensiblePredicates,
pub(super) local_extensible_predicates: LocalExtensiblePredicates,
pub(super) global_variables: GlobalVarDir,
pub(super) meta_predicates: MetaPredicateDir,
pub(super) modules: ModuleDir,
pub(super) op_dir: OpDir,
pub(super) streams: StreamDir,
pub(super) stream_aliases: StreamAliasDir,
}
impl IndexStore {
pub(crate) fn get_predicate_skeleton_mut(
&mut self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
) -> Option<&mut PredicateSkeleton> {
match compilation_target {
CompilationTarget::User => self.extensible_predicates.get_mut(key),
CompilationTarget::Module(ref module_name) => {
if let Some(module) = self.modules.get_mut(module_name) {
module.extensible_predicates.get_mut(key)
} else {
None
}
}
}
}
pub(crate) fn get_predicate_skeleton(
&self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
) -> Option<&PredicateSkeleton> {
match compilation_target {
CompilationTarget::User => self.extensible_predicates.get(key),
CompilationTarget::Module(ref module_name) => {
if let Some(module) = self.modules.get(module_name) {
module.extensible_predicates.get(key)
} else {
None
}
}
}
}
pub(crate) fn get_local_predicate_skeleton_mut(
&mut self,
mut src_compilation_target: CompilationTarget,
local_compilation_target: CompilationTarget,
listing_src_file_name: Option<Atom>,
key: PredicateKey,
) -> Option<&mut LocalPredicateSkeleton> {
if let Some(filename) = listing_src_file_name {
src_compilation_target = CompilationTarget::Module(filename);
}
match src_compilation_target {
CompilationTarget::User => self
.local_extensible_predicates
.get_mut(&(local_compilation_target, key)),
CompilationTarget::Module(module_name) => {
if let Some(module) = self.modules.get_mut(&module_name) {
module
.local_extensible_predicates
.get_mut(&(local_compilation_target, key))
} else {
None
}
}
}
}
pub(crate) fn get_local_predicate_skeleton(
&self,
mut src_compilation_target: CompilationTarget,
local_compilation_target: CompilationTarget,
listing_src_file_name: Option<Atom>,
key: PredicateKey,
) -> Option<&LocalPredicateSkeleton> {
if let Some(filename) = listing_src_file_name |
match src_compilation_target {
CompilationTarget::User => self
.local_extensible_predicates
.get(&(local_compilation_target, key)),
CompilationTarget::Module(module_name) => {
if let Some(module) = self.modules.get(&module_name) {
module
.local_extensible_predicates
.get(&(local_compilation_target, key))
} else {
None
}
}
}
}
pub(crate) fn remove_predicate_skeleton(
&mut self,
compilation_target: &CompilationTarget,
key: &PredicateKey,
) -> Option<PredicateSkeleton> {
match compilation_target {
CompilationTarget::User => self.extensible_predicates.remove(key),
CompilationTarget::Module(ref module_name) => {
if let Some(module) = self.modules.get_mut(module_name) {
module.extensible_predicates.remove(key)
} else {
None
}
}
}
}
pub(crate) fn get_predicate_code_index(
&self,
name: Atom,
arity: usize,
module: Atom,
) -> Option<CodeIndex> {
if module == atom!("user") {
match ClauseType::from(name, arity) {
ClauseType::Named(arity, name, _) => self.code_dir.get(&(name, arity)).cloned(),
_ => None,
}
} else {
self.modules
.get(&module)
.and_then(|module| match ClauseType::from(name, arity) {
ClauseType::Named(arity, name, _) => {
module.code_dir.get(&(name, arity)).cloned()
}
_ => None,
})
}
}
pub(crate) fn get_meta_predicate_spec(
&self,
name: Atom,
arity: usize,
compilation_target: &CompilationTarget,
) -> Option<&Vec<MetaSpec>> {
match compilation_target {
CompilationTarget::User => self.meta_predicates.get(&(name, arity)),
CompilationTarget::Module(ref module_name) => match self.modules.get(module_name) {
Some(ref module) => module
.meta_predicates
.get(&(name.clone(), arity))
.or_else(|| self.meta_predicates.get(&(name, arity))),
None => self.meta_predicates.get(&(name, arity)),
},
}
}
pub(crate) fn is_dynamic_predicate(
&self,
module_name: Atom,
key: PredicateKey,
) -> bool {
match module_name {
atom!("user") => self
.extensible_predicates
.get(&key)
.map(|skeleton| skeleton.core.is_dynamic)
.unwrap_or(false),
_ => match self.modules.get(&module_name) {
Some(ref module) => module
.extensible_predicates
.get(&key)
.map(|skeleton| skeleton.core.is_dynamic)
.unwrap_or(false),
None => false,
},
}
}
#[inline]
pub(super) fn new() -> Self {
index_store!(
CodeDir::with_hasher(FxBuildHasher::default()),
default_op_dir(),
ModuleDir::with_hasher(FxBuildHasher::default())
)
}
}
| {
src_compilation_target = CompilationTarget::Module(filename);
} |
test_cli.py | """
Tests for `kolibri.utils.cli` module.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import tempfile
import pytest
from django.db.utils import OperationalError
from mock import patch
import kolibri
from kolibri.plugins.utils import autoremove_unavailable_plugins
from kolibri.utils import cli
from kolibri.utils import options
logger = logging.getLogger(__name__)
LOG_LOGGER = []
def log_logger(logger_instance, LEVEL, msg, args, **kwargs):
"""
Monkeypatching for logging.Logger._log to scoop up log messages if we wanna
test something specific was logged.
"""
LOG_LOGGER.append((LEVEL, msg))
# Call the original function
logger_instance.__log(LEVEL, msg, args, **kwargs)
def activate_log_logger(monkeypatch):
"""
Activates logging everything to ``LOG_LOGGER`` with the monkeypatch pattern
of py.test (test accepts a ``monkeypatch`` argument)
"""
monkeypatch.setattr(logging.Logger, "__log", logging.Logger._log, raising=False)
monkeypatch.setattr(logging.Logger, "_log", log_logger)
@pytest.fixture
def plugins():
|
def test_bogus_plugin_autoremove(plugins):
"""
Checks that a plugin is auto-removed when it cannot be imported
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_autoremove_no_path(plugins):
"""
Checks that a plugin without a dotted path is also auto-removed
"""
plugin_name = "giraffehorse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
autoremove_unavailable_plugins()
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
def test_bogus_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
disabled_apps_before = plugins.config["DISABLED_PLUGINS"].copy()
try:
cli.disable.callback(("i_do_not_exist",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
assert disabled_apps_before == plugins.config["DISABLED_PLUGINS"]
def test_plugin_cannot_be_imported_disable(plugins):
"""
A plugin may be in plugins.config['INSTALLED_PLUGINS'] but broken or uninstalled
"""
plugin_name = "giraffe.horse"
plugins.config["INSTALLED_PLUGINS"].add(plugin_name)
plugins.config.save()
try:
cli.disable.callback((plugin_name,), False)
except Exception:
pass
assert plugin_name not in plugins.config["INSTALLED_PLUGINS"]
# We also don't want to endlessly add cruft to the disabled apps
assert plugin_name not in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
# Because RIP example plugin
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_real_plugin_disable_twice(plugins):
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
test_plugin = "kolibri.plugins.media_player"
assert test_plugin in installed_apps_before
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
cli.disable.callback((test_plugin,), False)
assert test_plugin not in plugins.config.ACTIVE_PLUGINS
assert test_plugin not in plugins.config["INSTALLED_PLUGINS"]
assert test_plugin in plugins.config["DISABLED_PLUGINS"]
def test_plugin_with_no_plugin_class(plugins):
"""
Expected behavior is that nothing blows up with exceptions, user just gets
a warning and nothing is enabled or changed in the configuration.
"""
# For fun, we pass in a system library
installed_apps_before = plugins.config["INSTALLED_PLUGINS"].copy()
try:
cli.enable.callback(("os.path",), False)
except Exception:
pass
assert installed_apps_before == plugins.config["INSTALLED_PLUGINS"]
@pytest.mark.django_db
def test_kolibri_listen_port_env(monkeypatch):
"""
Starts and stops the server, mocking the actual server.start()
Checks that the correct fallback port is used from the environment.
"""
with patch("django.core.management.call_command"), patch(
"kolibri.utils.server.start"
) as start:
from kolibri.utils import server
def start_mock(port, *args, **kwargs):
assert port == test_port
try:
os.remove(server.STARTUP_LOCK)
except OSError:
pass
activate_log_logger(monkeypatch)
start.side_effect = start_mock
test_port = 1234
os.environ["KOLIBRI_HTTP_PORT"] = str(test_port)
# force a reload of plugins.OPTIONS so the environment variable will be read in
from kolibri.utils import conf
conf.OPTIONS.update(options.read_options_file(conf.KOLIBRI_HOME))
cli.start.callback(test_port, False)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
# Stop the server AGAIN, asserting that we can call the stop command
# on an already stopped server and will be gracefully informed about
# it.
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == 0
assert "Already stopped" in LOG_LOGGER[-1][1]
def status_starting_up():
raise server.NotRunning(server.STATUS_STARTING_UP)
# Ensure that if a server is reported to be 'starting up', it doesn't
# get killed while doing that.
monkeypatch.setattr(server, "get_status", status_starting_up)
with pytest.raises(SystemExit) as excinfo:
cli.stop.callback()
assert excinfo.code == server.STATUS_STARTING_UP
assert "Not stopped" in LOG_LOGGER[-1][1]
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="")
@patch("kolibri.utils.cli.update")
@patch("kolibri.utils.cli.plugin.callback")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_first_run(dbbackup, plugin, update, get_version):
"""
Tests that the first_run() function performs as expected
"""
cli.initialize()
update.assert_called_once()
dbbackup.assert_not_called()
# Check that it got called for each default plugin
from kolibri import plugins
assert set(plugins.config["INSTALLED_PLUGINS"]) == set(plugins.DEFAULT_PLUGINS)
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
@patch("kolibri.utils.cli.update")
def test_update(update, get_version):
"""
Tests that update() function performs as expected
"""
cli.initialize()
update.assert_called_once()
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value="0.0.1")
def test_update_exits_if_running(get_version):
"""
Tests that update() function performs as expected
"""
with patch("kolibri.utils.cli.server.get_status"):
try:
cli.initialize()
pytest.fail("Update did not exit when Kolibri was already running")
except SystemExit:
pass
@pytest.mark.django_db
def test_version_updated():
"""
Tests our db backup logic: version_updated gets any change, backup gets only non-dev changes
"""
assert cli.version_updated("0.10.0", "0.10.1")
assert not cli.version_updated("0.10.0", "0.10.0")
assert not cli.should_back_up("0.10.0-dev0", "")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0")
assert not cli.should_back_up("0.10.0", "0.10.0-dev0")
assert not cli.should_back_up("0.10.0-dev0", "0.10.0-dev0")
@pytest.mark.django_db
@patch("kolibri.utils.cli.get_version", return_value=kolibri.__version__)
@patch("kolibri.utils.cli.update")
@patch("kolibri.core.deviceadmin.utils.dbbackup")
def test_update_no_version_change(dbbackup, update, get_version):
"""
Tests that when the version doesn't change, we are not doing things we
shouldn't
"""
cli.initialize()
update.assert_not_called()
dbbackup.assert_not_called()
def test_cli_usage():
# Test the -h
with pytest.raises(SystemExit) as excinfo:
cli.main("-h")
assert excinfo.code == 0
with pytest.raises(SystemExit) as excinfo:
cli.main("--version")
assert excinfo.code == 0
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
any(
map(
lambda x: test_plugin in x[0] and "ENABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli.click.echo")
def test_list_plugins_disabled(echo_mock, plugins):
cli.list.callback()
test_plugin = "kolibri.plugins.media_player"
cli.disable.callback((test_plugin,), False)
any(
map(
lambda x: test_plugin in x[0] and "DISABLED" in x[0],
echo_mock.call_args_list,
)
)
@patch("kolibri.utils.cli._migrate_databases")
@patch("kolibri.utils.cli.version_updated")
def test_migrate_if_unmigrated(version_updated, _migrate_databases):
# No matter what, ensure that version_updated returns False
version_updated.return_value = False
from morango.models import InstanceIDModel
with patch.object(
InstanceIDModel, "get_or_create_current_instance"
) as get_or_create_current_instance:
get_or_create_current_instance.side_effect = OperationalError("Test")
cli.initialize()
_migrate_databases.assert_called_once()
| from kolibri import plugins
_, config_file = tempfile.mkstemp(suffix="json")
old_config_file = plugins.conf_file
plugins.conf_file = config_file
plugins.config.set_defaults()
yield plugins
plugins.conf_file = old_config_file |
Nearest Neibour Search.py | import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import voronoi_plot_2d, Voronoi, KDTree
import pandas as pd
'''
Logical intuition: cannot use linear scan because time complexity O(n^2) is far too slow for a
large list; cannot sort also for poor time complexity.
Chosen step therefore for the 2D case is a voroni diagram based point location query algorithm.
Constructing the voroni diagram is implimented with scipy
Scipy implimentation for constructing the veroni diagram is robust, has O(2+ log(n)) time complexity for the d=2 case and
n^(1) space complexity, liniar space is good since question suggests low memory buffer;
Traversing the point location query is done using KD trees for a time complexity of O(log(n)) time complexity as an ANN
algorithmic approach, the intiution between not using an exact solution is to allow for subliminal solve time with general
solutions in the case of d>2.
'''
class ApproxNearestNeibours:
def NearestSearch():
x = [.22, .2, .4, .44, .42, .61, .17, .2, .63, .66]
y = [.21, .43, .23, .41, .42, .31, .2, .17, .62, .65]
London = [0,0]
list1 = np.random.rand(10,2)
list2= list(zip(x,y))
#Voroni plot
vor = Voronoi(list1)
#KD tree
tree = KDTree(list1)
locs, ids = tree.query(list2)
#Plotting and annotating (blue points are list1)
fig,ax = plt.subplots(1,1)
voronoi_plot_2d(vor,ax, show_vertices=False)
ax.scatter(x,y,s=20,color='r', label ='list two points')
ax.scatter(0,0, s=100, color='black', label= 'London')
for i in range(0,len(x)):
ax.annotate(ids[i], (x[i], y[i]), size = 10)
plt.xlabel('X co-ordinate')
plt.ylabel('Y co-ordinate')
plt.legend()
plt.show()
print("Id of point in list 1 that is the nearest neibour of point in list 2:", ids)
ID_Of_List1_Node = ids
'''
What I could then do is write a script to put back out into whatever database I got the lists from
to append with a third column of the #Id for the list 1 item that is closest to the given list 2 item
'''
if __name__ == "__main__":
NearestSearch()
from annoy import AnnoyIndex
import numpy as np
'''Extention tasks:
Can you generalise your code to n dimentions?
Yes, using the annoy libary in python, I can define a metric space of all of the
points in list 1 of n dimentions and then perform the same KD tree search in higher
dimentionality. Annoy package seems to generalise on testing up to around d<2000.
The use of the KD tree is to allow for subliminal complexity solves in the d>2 case,
this is because exact methods such as solving the kirkpatrick point localisation
data structure cannot have subliminal time complexity solves in higher dimentional
systems along with the liniar construction complexity of the voroni diagram in higher
dimentions would Violate the mathmatics of complexity-theoretic conjecture (the strong
exponential time hypothesis.)
'''
class | :
__innit__(self):
self.f = 40 #Number dimentions operating in for the index
#Data will have n dimentions incoming
list1 = np.random.rand(10,f)
list2 = np.random.rand(10,f)
t = AnnoyIndex(f, 'euclidian') #Length of item vector that will be indexed
for i in len(list1):
t.add_item(i, list1) #Append the items of list 1 to the defined metric space
t.build(10) # 10 trees
t.save('test.ann')
u = AnnoyIndex(f, 'euclidian')
u.load('test.ann') # super fast, will just mmap the file
for i in len(list2):
print("Nearest neibour for each element in list2:", u.get_nns_by_item(0, 1)) # will find the nearest neighbor
'''
As for the 2d case, these could then be saved into a database to map each point in the second list to its
closest point in the first
'''
'''
Extention task 2:
how would your answer change if your lists were too large to fit on a single machine?
for the 2d case using the veroni diagrams cannot be incremented to split the map of england into smaller parts
as veroni mappings to calculate require all the data in proximity. This would require a sort which would have
poor time complexity for huge datasets.
Therefore instead I would use the annoy package again for all cases:
For the annoy package the indexes of the metric spaces created for the first list of points can be run on disk to deal
with this exact issue allowing for the calculations to still be performed. Additionally, memory between multiple processes
on the same metric space can be processed, so list 2 CAN be split into multiple sections that will load into memory and the
ANN K-D algorithm can be applied to find their nearest neibours in any dimention.
Also for extra joy, the process can be done in parallel computation by defining the routing of the points of list 2 to specific computers
which each have an index on disk to allow for computational parrallelism.
'''
'''
Extention task 3:
Would your answer change if the points were in a non metric space?
Yes it would- In short the question would lose all meaning. To summise, the definition
of a metric space can be put as any set space with a defining metric. Thus, any non metric
space either has no set (so list 1 and list2 are empty and there are no points to calculate),
or there is no metric in which case the space has no concept of distance, everything is everywhere).
'''
| annoy |
grouping.go | //
// Copyright 2020-2022 Sean C Foley
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package ipaddr
import (
"fmt"
"github.com/seancfoley/ipaddress-go/ipaddr/addrerr"
"math/big"
"sync/atomic"
"unsafe"
)
func createGrouping(divs []*AddressDivision, prefixLength PrefixLen, addrType addrType) *AddressDivisionGrouping {
grouping := &AddressDivisionGrouping{
addressDivisionGroupingInternal{
addressDivisionGroupingBase: addressDivisionGroupingBase{
divisions: standardDivArray(divs),
prefixLength: prefixLength,
addrType: addrType,
cache: &valueCache{},
},
},
}
assignStringCache(&grouping.addressDivisionGroupingBase, addrType)
return grouping
}
func createGroupingMultiple(divs []*AddressDivision, prefixLength PrefixLen, isMultiple bool) *AddressDivisionGrouping {
result := createGrouping(divs, prefixLength, zeroType)
result.isMult = isMultiple
return result
}
func createInitializedGrouping(divs []*AddressDivision, prefixLength PrefixLen) *AddressDivisionGrouping {
result := createGrouping(divs, prefixLength, zeroType)
result.initMultiple() // assigns isMult
return result
}
// NewDivisionGrouping creates an arbitrary grouping of divisions.
// To create address sections or addresses, use the constructors that are specific to the address version or type.
// The AddressDivision instances can be created with the NewDivision, NewRangeDivision, NewPrefixDivision or NewRangePrefixDivision functions.
func NewDivisionGrouping(divs []*AddressDivision, prefixLength PrefixLen) *AddressDivisionGrouping {
return createInitializedGrouping(divs, prefixLength)
}
var (
emptyBytes = []byte{}
)
type addressDivisionGroupingInternal struct {
addressDivisionGroupingBase
// TODO LATER refactor to support infiniband, which will involve multiple types.
// But that will be a joint effort with Java and will wait to later.
}
func createSegmentArray(length int) []*AddressDivision {
return make([]*AddressDivision, length)
}
func (grouping *addressDivisionGroupingInternal) initMultiple() {
divCount := grouping.getDivisionCount()
for i := divCount - 1; i >= 0; i-- {
div := grouping.getDivision(i)
if div.isMultiple() {
grouping.isMult = true
return
}
}
return
}
func (grouping *addressDivisionGroupingInternal) getDivArray() standardDivArray {
if divsArray := grouping.divisions; divsArray != nil {
return divsArray.(standardDivArray)
}
return nil
}
// getDivision returns the division or panics if the index is negative or too large
func (grouping *addressDivisionGroupingInternal) getDivision(index int) *AddressDivision {
return grouping.getDivArray()[index]
}
// getDivisionsInternal returns the divisions slice, only to be used internally
func (grouping *addressDivisionGroupingInternal) getDivisionsInternal() []*AddressDivision {
return grouping.getDivArray()
}
func (grouping *addressDivisionGroupingInternal) getDivisionCount() int {
if divArray := grouping.getDivArray(); divArray != nil {
return divArray.getDivisionCount()
}
return 0
}
func (grouping *addressDivisionGroupingInternal) forEachSubDivision(start, end int, target func(index int, div *AddressDivision), targetLen int) (count int) {
divArray := grouping.getDivArray()
if divArray != nil {
// if not enough space in target, adjust
if targetEnd := start + targetLen; end > targetEnd {
end = targetEnd
}
divArray = divArray[start:end]
for i, div := range divArray {
target(i, div)
}
}
return len(divArray)
}
func adjust1To1StartIndices(sourceStart, sourceEnd, sourceCount, targetCount int) (newSourceStart, newSourceEnd, newTargetStart int) {
// both sourceCount and targetCount are lengths of their respective slices, so never negative
targetStart := 0
if sourceStart < 0 {
targetStart -= sourceStart
sourceStart = 0
if targetStart > targetCount || targetStart < 0 /* overflow */ {
targetStart = targetCount
}
} else if sourceStart > sourceCount {
sourceStart = sourceCount
}
// how many to copy?
if sourceEnd > sourceCount { // end index exceeds available
sourceEnd = sourceCount
} else if sourceEnd < sourceStart {
sourceEnd = sourceStart
}
return sourceStart, sourceEnd, targetStart
}
func adjust1To1Indices(sourceStart, sourceEnd, sourceCount, targetCount int) (newSourceStart, newSourceEnd, newTargetStart int) {
var targetStart int
sourceStart, sourceEnd, targetStart = adjust1To1StartIndices(sourceStart, sourceEnd, sourceCount, targetCount)
if limitEnd := sourceStart + (targetCount - targetStart); sourceEnd > limitEnd {
sourceEnd = limitEnd
}
return sourceStart, sourceEnd, targetStart
}
func adjustIndices(
startIndex, endIndex, sourceCount,
replacementStartIndex, replacementEndIndex, replacementSegmentCount int) (int, int, int, int) {
if startIndex < 0 {
startIndex = 0
} else if startIndex > sourceCount {
startIndex = sourceCount
}
if endIndex < startIndex {
endIndex = startIndex
} else if endIndex > sourceCount {
endIndex = sourceCount
}
if replacementStartIndex < 0 {
replacementStartIndex = 0
} else if replacementStartIndex > replacementSegmentCount {
replacementStartIndex = replacementSegmentCount
}
if replacementEndIndex < replacementStartIndex {
replacementEndIndex = replacementStartIndex
} else if replacementEndIndex > replacementSegmentCount {
replacementEndIndex = replacementSegmentCount
}
return startIndex, endIndex, replacementStartIndex, replacementEndIndex
}
// copySubDivisions copies the existing segments from the given start index until but not including the segment at the given end index,
// into the given slice, as much as can be fit into the slice, returning the number of segments copied
func (grouping *addressDivisionGroupingInternal) copySubDivisions(start, end int, divs []*AddressDivision) (count int) {
if divArray := grouping.getDivArray(); divArray != nil {
start, end, targetIndex := adjust1To1Indices(start, end, grouping.GetDivisionCount(), len(divs))
return divArray.copySubDivisions(start, end, divs[targetIndex:])
}
return
}
// copyDivisions copies the existing segments from the given start index until but not including the segment at the given end index,
// into the given slice, as much as can be fit into the slice, returning the number of segments copied
func (grouping *addressDivisionGroupingInternal) copyDivisions(divs []*AddressDivision) (count int) {
if divArray := grouping.getDivArray(); divArray != nil {
return divArray.copyDivisions(divs)
}
return
}
func (grouping *addressDivisionGroupingInternal) getSubDivisions(start, end int) []*AddressDivision {
divArray := grouping.getDivArray()
if divArray != nil {
return divArray.getSubDivisions(start, end)
} else if start != 0 || end != 0 {
panic("invalid subslice")
}
return make([]*AddressDivision, 0)
}
func (grouping *addressDivisionGroupingInternal) isAddressSection() bool {
return grouping != nil && grouping.matchesAddrSectionType()
}
func (grouping *addressDivisionGroupingInternal) compareSize(other StandardDivGroupingType) int { // the getCount() is optimized which is why we do not defer to the method in addressDivisionGroupingBase
if other == nil || other.ToDivGrouping() == nil {
// our size is 1 or greater, other 0
return 1
}
if !grouping.isMultiple() {
if other.IsMultiple() {
return -1
}
return 0
} else if !other.IsMultiple() {
return 1
}
return grouping.getCount().CmpAbs(other.GetCount())
}
func (grouping *addressDivisionGroupingInternal) getCount() *big.Int {
if !grouping.isMultiple() {
return bigOne()
} else if section := grouping.toAddressSection(); section != nil {
return section.GetCount()
}
return grouping.addressDivisionGroupingBase.getCount()
}
// GetPrefixCount returns the number of distinct prefix values in this item.
//
// The prefix length is given by GetPrefixLen.
//
// If this has a non-nil prefix length, returns the number of distinct prefix values.
//
// If this has a nil prefix length, returns the same value as GetCount
func (grouping *addressDivisionGroupingInternal) GetPrefixCount() *big.Int {
if section := grouping.toAddressSection(); section != nil {
return section.GetPrefixCount()
}
return grouping.addressDivisionGroupingBase.GetPrefixCount()
}
// GetPrefixCountLen returns the number of distinct prefix values in this item for the given prefix length
func (grouping *addressDivisionGroupingInternal) GetPrefixCountLen(prefixLen BitCount) *big.Int {
if section := grouping.toAddressSection(); section != nil {
return section.GetPrefixCountLen(prefixLen)
}
return grouping.addressDivisionGroupingBase.GetPrefixCountLen(prefixLen)
}
func (grouping *addressDivisionGroupingInternal) getDivisionStrings() []string {
if grouping.hasNoDivisions() {
return []string{}
}
result := make([]string, grouping.GetDivisionCount())
for i := range result {
result[i] = grouping.getDivision(i).String()
}
return result
}
func (grouping *addressDivisionGroupingInternal) getSegmentStrings() []string {
if grouping.hasNoDivisions() {
return []string{}
}
result := make([]string, grouping.GetDivisionCount())
for i := range result {
result[i] = grouping.getDivision(i).GetWildcardString()
}
return result
}
func (grouping *addressDivisionGroupingInternal) toAddressDivisionGrouping() *AddressDivisionGrouping {
return (*AddressDivisionGrouping)(unsafe.Pointer(grouping))
}
func (grouping *addressDivisionGroupingInternal) toAddressSection() *AddressSection {
return grouping.toAddressDivisionGrouping().ToSectionBase()
}
func (grouping *addressDivisionGroupingInternal) matchesIPv6AddressType() bool {
return grouping.getAddrType().isIPv6() // no need to check segment count because addresses cannot be constructed with incorrect segment count
}
func (grouping *addressDivisionGroupingInternal) matchesIPv4AddressType() bool {
return grouping.getAddrType().isIPv4() // no need to check segment count because addresses cannot be constructed with incorrect segment count
}
func (grouping *addressDivisionGroupingInternal) matchesIPAddressType() bool {
return grouping.matchesIPSectionType() // no need to check segment count because addresses cannot be constructed with incorrect segment count (note the zero IPAddress has zero segments)
}
func (grouping *addressDivisionGroupingInternal) matchesMACAddressType() bool {
return grouping.getAddrType().isMAC()
}
// The adaptive zero grouping, produced by zero sections like IPv4AddressSection{} or AddressDivisionGrouping{}, can represent a zero-length section of any address type,
// It is not considered equal to constructions of specific zero length sections of groupings like NewIPv4Section(nil) which can only represent a zero-length section of a single address type.
func (grouping *addressDivisionGroupingInternal) matchesZeroGrouping() bool {
addrType := grouping.getAddrType()
return addrType.isNil() && grouping.hasNoDivisions()
}
func (grouping *addressDivisionGroupingInternal) matchesAddrSectionType() bool {
addrType := grouping.getAddrType()
// because there are no init() conversions for IPv6/IPV4/MAC sections, an implicitly zero-valued IPv6/IPV4/MAC or zero IP section has addr type nil
return addrType.isIP() || addrType.isMAC() || grouping.matchesZeroGrouping()
}
func (grouping *addressDivisionGroupingInternal) matchesIPv6SectionType() bool {
// because there are no init() conversions for IPv6 sections, an implicitly zero-valued IPV6 section has addr type nil
return grouping.getAddrType().isIPv6() || grouping.matchesZeroGrouping()
}
func (grouping *addressDivisionGroupingInternal) matchesIPv6v4MixedGroupingType() bool {
// because there are no init() conversions for IPv6v4MixedGrouping groupings, an implicitly zero-valued IPv6v4MixedGrouping has addr type nil
return grouping.getAddrType().isIPv6v4Mixed() || grouping.matchesZeroGrouping()
}
func (grouping *addressDivisionGroupingInternal) matchesIPv4SectionType() bool {
// because there are no init() conversions for IPV4 sections, an implicitly zero-valued IPV4 section has addr type nil
return grouping.getAddrType().isIPv4() || grouping.matchesZeroGrouping()
}
func (grouping *addressDivisionGroupingInternal) matchesIPSectionType() bool {
// because there are no init() conversions for IPv6 or IPV4 sections, an implicitly zero-valued IPv4, IPv6 or IP section has addr type nil
return grouping.getAddrType().isIP() || grouping.matchesZeroGrouping()
}
func (grouping *addressDivisionGroupingInternal) matchesMACSectionType() bool {
// because there are no init() conversions for MAC sections, an implicitly zero-valued MAC section has addr type nil
return grouping.getAddrType().isMAC() || grouping.matchesZeroGrouping()
}
// Format implements fmt.Formatter interface. It accepts the formats
// 'v' for the default address and section format (either the normalized or canonical string),
// 's' (string) for the same,
// 'b' (binary), 'o' (octal with 0 prefix), 'O' (octal with 0o prefix),
// 'd' (decimal), 'x' (lowercase hexadecimal), and
// 'X' (uppercase hexadecimal).
// Also supported are some of fmt's format flags for integral types.
// Sign control is not supported since addresses and sections are never negative.
// '#' for an alternate format is supported, which is leading zero for octal and for hexadecimal,
// a leading "0x" or "0X" for "%#x" and "%#X" respectively,
// Also supported is specification of minimum digits precision, output field width,
// space or zero padding, and '-' for left or right justification.
func (grouping addressDivisionGroupingInternal) Format(state fmt.State, verb rune) {
if sect := grouping.toAddressSection(); sect != nil {
sect.Format(state, verb)
return
} else if mixed := grouping.toAddressDivisionGrouping().ToMixedIPv6v4(); mixed != nil {
mixed.Format(state, verb)
return
}
// divisions are printed like slices of *AddressDivision (which are Stringers) with division separated by spaces and enclosed in square brackets,
// sections are printed like addresses with segments separated by segment separators
grouping.defaultFormat(state, verb)
}
func (grouping addressDivisionGroupingInternal) defaultFormat(state fmt.State, verb rune) {
s := flagsFromState(state, verb)
_, _ = state.Write([]byte(fmt.Sprintf(s, grouping.initDivs().getDivArray())))
}
func (grouping *addressDivisionGroupingInternal) toString() string {
if sect := grouping.toAddressSection(); sect != nil {
return sect.ToNormalizedString()
}
return fmt.Sprint(grouping.initDivs().getDivArray())
}
func (grouping *addressDivisionGroupingInternal) initDivs() *addressDivisionGroupingInternal {
if grouping.divisions == nil {
return &zeroSection.addressDivisionGroupingInternal
}
return grouping
} | return grouping.prefixLength
}
// GetPrefixLen returns the prefix length, or nil if there is no prefix length.
//
// A prefix length indicates the number of bits in the initial part of the address item that comprises the prefix.
//
// A prefix is a part of the address item that is not specific to that address but common amongst a group of such items, such as a CIDR prefix block subnet.
func (grouping *addressDivisionGroupingInternal) GetPrefixLen() PrefixLen {
return grouping.getPrefixLen().copy()
}
func (grouping *addressDivisionGroupingInternal) isPrefixed() bool {
return grouping.prefixLength != nil
}
//TODO LATER eventually when supporting large divisions,
//might move containsPrefixBlock(prefixLen BitCount), containsSinglePrefixBlock(prefixLen BitCount),
// GetMinPrefixLenForBlock, and GetPrefixLenForSingleBlock into groupingBase code
// IsPrefixBlock, IsSinglePrefixBlock
// which looks straightforward since none deal with DivInt, instead they all call into divisionValues interface
// ContainsPrefixBlock returns whether the values of this item contains the block of values for the given prefix length.
//
// Unlike ContainsSinglePrefixBlock, whether there are multiple prefix values in this item for the given prefix length makes no difference.
//
// Use GetMinPrefixLenForBlock() to determine the smallest prefix length for which this method returns true.
func (grouping *addressDivisionGroupingInternal) ContainsPrefixBlock(prefixLen BitCount) bool {
if section := grouping.toAddressSection(); section != nil {
return section.ContainsPrefixBlock(prefixLen)
}
prefixLen = checkSubnet(grouping.toAddressDivisionGrouping(), prefixLen)
divisionCount := grouping.GetDivisionCount()
var prevBitCount BitCount
for i := 0; i < divisionCount; i++ {
division := grouping.getDivision(i)
bitCount := division.GetBitCount()
totalBitCount := bitCount + prevBitCount
if prefixLen < totalBitCount {
divPrefixLen := prefixLen - prevBitCount
if !division.containsPrefixBlock(divPrefixLen) {
return false
}
for i++; i < divisionCount; i++ {
division = grouping.getDivision(i)
if !division.IsFullRange() {
return false
}
}
return true
}
prevBitCount = totalBitCount
}
return true
}
// ContainsSinglePrefixBlock returns whether the values of this grouping contains a single prefix block for the given prefix length.
//
// This means there is only one prefix of the given length in this item, and this item contains the prefix block for that given prefix.
//
// Use GetPrefixLenForSingleBlock to determine whether there is a prefix length for which this method returns true.
func (grouping *addressDivisionGroupingInternal) ContainsSinglePrefixBlock(prefixLen BitCount) bool {
prefixLen = checkSubnet(grouping.toAddressDivisionGrouping(), prefixLen)
divisionCount := grouping.GetDivisionCount()
var prevBitCount BitCount
for i := 0; i < divisionCount; i++ {
division := grouping.getDivision(i)
bitCount := division.getBitCount()
totalBitCount := bitCount + prevBitCount
if prefixLen >= totalBitCount {
if division.isMultiple() {
return false
}
} else {
divPrefixLen := prefixLen - prevBitCount
if !division.ContainsSinglePrefixBlock(divPrefixLen) {
return false
}
for i++; i < divisionCount; i++ {
division = grouping.getDivision(i)
if !division.IsFullRange() {
return false
}
}
return true
}
prevBitCount = totalBitCount
}
return true
}
// IsPrefixBlock returns whether this address segment series has a prefix length and includes the block associated with its prefix length.
// If the prefix length matches the bit count, this returns true.
//
// This is different from ContainsPrefixBlock in that this method returns
// false if the series has no prefix length or a prefix length that differs from prefix lengths for which ContainsPrefixBlock returns true.
func (grouping *addressDivisionGroupingInternal) IsPrefixBlock() bool { //Note for any given prefix length you can compare with GetMinPrefixLenForBlock
prefLen := grouping.getPrefixLen()
return prefLen != nil && grouping.ContainsPrefixBlock(prefLen.bitCount())
}
// IsSinglePrefixBlock returns whether the range of values matches a single subnet block for the prefix length.
//
// What distinguishes this method with ContainsSinglePrefixBlock is that this method returns
// false if the series does not have a prefix length assigned to it,
// or a prefix length that differs from the prefix length for which ContainsSinglePrefixBlock returns true.
//
// It is similar to IsPrefixBlock but returns false when there are multiple prefixes.
func (grouping *addressDivisionGroupingInternal) IsSinglePrefixBlock() bool { //Note for any given prefix length you can compare with GetPrefixLenForSingleBlock
calc := func() bool {
prefLen := grouping.getPrefixLen()
return prefLen != nil && grouping.ContainsSinglePrefixBlock(prefLen.bitCount())
}
cache := grouping.cache
if cache == nil {
return calc()
}
res := cache.isSinglePrefixBlock
if res == nil {
if calc() {
res = &trueVal
// we can also set related cache fields
pref := grouping.getPrefixLen()
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.equivalentPrefix))
equivPref := cachePrefix(pref.bitCount())
atomic.StorePointer(dataLoc, unsafe.Pointer(equivPref))
dataLoc = (*unsafe.Pointer)(unsafe.Pointer(&cache.minPrefix))
atomic.StorePointer(dataLoc, unsafe.Pointer(pref))
} else {
res = &falseVal
}
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.isSinglePrefixBlock))
atomic.StorePointer(dataLoc, unsafe.Pointer(res))
}
return *res
}
// GetMinPrefixLenForBlock returns the smallest prefix length such that this grouping includes the block of all values for that prefix length.
//
// If the entire range can be described this way, then this method returns the same value as GetPrefixLenForSingleBlock.
//
// There may be a single prefix, or multiple possible prefix values in this item for the returned prefix length.
// Use GetPrefixLenForSingleBlock to avoid the case of multiple prefix values.
//
// If this grouping represents a single value, this returns the bit count.
func (grouping *addressDivisionGroupingInternal) GetMinPrefixLenForBlock() BitCount {
calc := func() BitCount {
count := grouping.GetDivisionCount()
totalPrefix := grouping.GetBitCount()
for i := count - 1; i >= 0; i-- {
div := grouping.getDivision(i)
segBitCount := div.getBitCount()
segPrefix := div.GetMinPrefixLenForBlock()
if segPrefix == segBitCount {
break
} else {
totalPrefix -= segBitCount
if segPrefix != 0 {
totalPrefix += segPrefix
break
}
}
}
return totalPrefix
}
cache := grouping.cache
if cache == nil {
return calc()
}
res := cache.minPrefix
if res == nil {
val := calc()
res = cacheBitCount(val)
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.minPrefix))
atomic.StorePointer(dataLoc, unsafe.Pointer(res))
}
return res.bitCount()
}
// GetPrefixLenForSingleBlock returns a prefix length for which the range of this division grouping matches the block of addresses for that prefix.
//
// If no such prefix exists, GetPrefixLenForSingleBlock returns nil.
//
// If this division grouping represents a single value, returns the bit length.
func (grouping *addressDivisionGroupingInternal) GetPrefixLenForSingleBlock() PrefixLen {
calc := func() *PrefixLen {
count := grouping.GetDivisionCount()
var totalPrefix BitCount
for i := 0; i < count; i++ {
div := grouping.getDivision(i)
divPrefix := div.GetPrefixLenForSingleBlock()
if divPrefix == nil {
return cacheNilPrefix()
}
divPrefLen := divPrefix.bitCount()
totalPrefix += divPrefLen
if divPrefLen < div.GetBitCount() {
//remaining segments must be full range or we return nil
for i++; i < count; i++ {
laterDiv := grouping.getDivision(i)
if !laterDiv.IsFullRange() {
return cacheNilPrefix()
}
}
}
}
return cachePrefix(totalPrefix)
}
cache := grouping.cache
if cache == nil {
return *calc()
}
res := cache.equivalentPrefix
if res == nil {
res = calc()
if *res == nil {
// we can also set related cache fields
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.isSinglePrefixBlock))
atomic.StorePointer(dataLoc, unsafe.Pointer(&falseVal))
} else {
// we can also set related cache fields
var isSingleBlock *bool
if grouping.isPrefixed() && (*res).Equal(grouping.getPrefixLen()) {
isSingleBlock = &trueVal
} else {
isSingleBlock = &falseVal
}
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.isSinglePrefixBlock))
atomic.StorePointer(dataLoc, unsafe.Pointer(isSingleBlock))
dataLoc = (*unsafe.Pointer)(unsafe.Pointer(&cache.minPrefix))
atomic.StorePointer(dataLoc, unsafe.Pointer(*res))
}
dataLoc := (*unsafe.Pointer)(unsafe.Pointer(&cache.equivalentPrefix))
atomic.StorePointer(dataLoc, unsafe.Pointer(res))
}
return *res
}
// GetValue returns the lowest individual address division grouping in this address division grouping as an integer value
func (grouping *addressDivisionGroupingInternal) GetValue() *big.Int {
if grouping.hasNoDivisions() {
return bigZero()
}
return bigZero().SetBytes(grouping.getBytes())
}
// GetUpperValue returns the highest individual address division grouping in this address division grouping as an integer value
func (grouping *addressDivisionGroupingInternal) GetUpperValue() *big.Int {
if grouping.hasNoDivisions() {
return bigZero()
}
return bigZero().SetBytes(grouping.getUpperBytes())
}
// Bytes returns the lowest individual division grouping in this grouping as a byte slice
func (grouping *addressDivisionGroupingInternal) Bytes() []byte {
if grouping.hasNoDivisions() {
return emptyBytes
}
cached := grouping.getBytes()
return cloneBytes(cached)
}
// UpperBytes returns the highest individual division grouping in this grouping as a byte slice
func (grouping *addressDivisionGroupingInternal) UpperBytes() []byte {
if grouping.hasNoDivisions() {
return emptyBytes
}
cached := grouping.getUpperBytes()
return cloneBytes(cached)
}
// CopyBytes copies the value of the lowest division grouping in the range into a byte slice
//
// If the value can fit in the given slice, the value is copied into that slice and a length-adjusted sub-slice is returned.
// Otherwise, a new slice is created and returned with the value.
//
// You can use GetByteCount to determine the required array length for the bytes.
func (grouping *addressDivisionGroupingInternal) CopyBytes(bytes []byte) []byte {
if grouping.hasNoDivisions() {
if bytes != nil {
return bytes
}
return emptyBytes
}
return getBytesCopy(bytes, grouping.getBytes())
}
// CopyUpperBytes copies the value of the highest division grouping in the range into a byte slice
//
// If the value can fit in the given slice, the value is copied into that slice and a length-adjusted sub-slice is returned.
// Otherwise, a new slice is created and returned with the value.
//
// You can use GetByteCount to determine the required array length for the bytes.
func (grouping *addressDivisionGroupingInternal) CopyUpperBytes(bytes []byte) []byte {
if grouping.hasNoDivisions() {
if bytes != nil {
return bytes
}
return emptyBytes
}
return getBytesCopy(bytes, grouping.getUpperBytes())
}
func (grouping *addressDivisionGroupingInternal) getBytes() (bytes []byte) {
bytes, _ = grouping.getCachedBytes(grouping.calcBytes)
return
}
func (grouping *addressDivisionGroupingInternal) getUpperBytes() (bytes []byte) {
_, bytes = grouping.getCachedBytes(grouping.calcBytes)
return
}
func (grouping *addressDivisionGroupingInternal) calcBytes() (bytes, upperBytes []byte) {
addrType := grouping.getAddrType()
divisionCount := grouping.GetDivisionCount()
isMultiple := grouping.isMultiple()
if addrType.isIPv4() || addrType.isMAC() {
bytes = make([]byte, divisionCount)
if isMultiple {
upperBytes = make([]byte, divisionCount)
} else {
upperBytes = bytes
}
for i := 0; i < divisionCount; i++ {
seg := grouping.getDivision(i).ToSegmentBase()
bytes[i] = byte(seg.GetSegmentValue())
if isMultiple {
upperBytes[i] = byte(seg.GetUpperSegmentValue())
}
}
} else if addrType.isIPv6() {
byteCount := divisionCount << 1
bytes = make([]byte, byteCount)
if isMultiple {
upperBytes = make([]byte, byteCount)
} else {
upperBytes = bytes
}
for i := 0; i < divisionCount; i++ {
seg := grouping.getDivision(i).ToSegmentBase()
byteIndex := i << 1
val := seg.GetSegmentValue()
bytes[byteIndex] = byte(val >> 8)
var upperVal SegInt
if isMultiple {
upperVal = seg.GetUpperSegmentValue()
upperBytes[byteIndex] = byte(upperVal >> 8)
}
nextByteIndex := byteIndex + 1
bytes[nextByteIndex] = byte(val)
if isMultiple {
upperBytes[nextByteIndex] = byte(upperVal)
}
}
} else {
byteCount := grouping.GetByteCount()
bytes = make([]byte, byteCount)
if isMultiple {
upperBytes = make([]byte, byteCount)
} else {
upperBytes = bytes
}
for k, byteIndex, bitIndex := divisionCount-1, byteCount-1, BitCount(8); k >= 0; k-- {
div := grouping.getDivision(k)
val := div.GetDivisionValue()
var upperVal DivInt
if isMultiple {
upperVal = div.GetUpperDivisionValue()
}
divBits := div.GetBitCount()
for divBits > 0 {
rbi := 8 - bitIndex
bytes[byteIndex] |= byte(val << uint(rbi))
val >>= uint(bitIndex)
if isMultiple {
upperBytes[byteIndex] |= byte(upperVal << uint(rbi))
upperVal >>= uint(bitIndex)
}
if divBits < bitIndex {
bitIndex -= divBits
break
} else {
divBits -= bitIndex
bitIndex = 8
byteIndex--
}
}
}
}
return
}
// IsSequential returns whether the grouping represents a range of values that are sequential.
//
// Generally, this means that any division covering a range of values must be followed by divisions that are full range, covering all values.
func (grouping *addressDivisionGroupingInternal) IsSequential() bool {
count := grouping.GetDivisionCount()
if count > 1 {
for i := 0; i < count; i++ {
if grouping.getDivision(i).isMultiple() {
for i++; i < count; i++ {
if !grouping.getDivision(i).IsFullRange() {
return false
}
}
return true
}
}
}
return true
}
func (grouping *addressDivisionGroupingInternal) createNewDivisions(bitsPerDigit BitCount) ([]*AddressDivision, addrerr.IncompatibleAddressError) {
return grouping.createNewPrefixedDivisions(bitsPerDigit, nil)
}
func (grouping *addressDivisionGroupingInternal) createNewPrefixedDivisions(bitsPerDigit BitCount, networkPrefixLength PrefixLen) ([]*AddressDivision, addrerr.IncompatibleAddressError) {
bitCount := grouping.GetBitCount()
var bitDivs []BitCount
// here we divide into divisions, each with an exact number of digits.
// Each digit takes 3 bits. So the division bit-sizes are a multiple of 3 until the last one.
//ipv6 octal:
//seg bit counts: 63, 63, 2
//ipv4 octal:
//seg bit counts: 30, 2
largestBitCount := BitCount(64) // uint64, size of DivInt
largestBitCount -= largestBitCount % bitsPerDigit // round off to a multiple of 3 bits
for {
if bitCount <= largestBitCount {
mod := bitCount % bitsPerDigit
secondLast := bitCount - mod
if secondLast > 0 {
bitDivs = append(bitDivs, secondLast)
}
if mod > 0 {
bitDivs = append(bitDivs, mod)
}
break
} else {
bitCount -= largestBitCount
bitDivs = append(bitDivs, largestBitCount)
}
}
// at this point bitDivs has our division sizes
divCount := len(bitDivs)
divs := make([]*AddressDivision, divCount)
if divCount > 0 {
//S divs[] = groupingArrayCreator.apply(divCount);
currentSegmentIndex := 0
seg := grouping.getDivision(currentSegmentIndex)
segLowerVal := seg.GetDivisionValue()
segUpperVal := seg.GetUpperDivisionValue()
segBits := seg.GetBitCount()
bitsSoFar := BitCount(0)
// 2 to the x is all ones shift left x, then not, then add 1
// so, for x == 1, 1111111 -> 1111110 -> 0000001 -> 0000010
//radix := ^(^(0) << uint(bitsPerDigit)) + 1
//fill up our new divisions, one by one
for i := divCount - 1; i >= 0; i-- {
divBitSize := bitDivs[i]
originalDivBitSize := divBitSize
var divLowerValue, divUpperValue uint64
for {
if segBits >= divBitSize { // this segment fills the remainder of this division
diff := uint(segBits - divBitSize)
segBits = BitCount(diff)
segL := segLowerVal >> diff
segU := segUpperVal >> diff
// if the division upper bits are multiple, then the lower bits inserted must be full range
if divLowerValue != divUpperValue {
if segL != 0 || segU != ^(^uint64(0)<<uint(divBitSize)) {
return nil, &incompatibleAddressError{addressError: addressError{key: "ipaddress.error.invalid.joined.ranges"}}
}
}
divLowerValue |= segL
divUpperValue |= segU
shift := ^(^uint64(0) << diff)
segLowerVal &= shift
segUpperVal &= shift
// if a segment's bits are split into two divisions, and the bits going into the first division are multi-valued,
// then the bits going into the second division must be full range
if segL != segU {
if segLowerVal != 0 || segUpperVal != ^(^uint64(0)<<uint(segBits)) {
return nil, &incompatibleAddressError{addressError: addressError{key: "ipaddress.error.invalid.joined.ranges"}}
}
}
var segPrefixBits PrefixLen
if networkPrefixLength != nil {
segPrefixBits = getDivisionPrefixLength(originalDivBitSize, networkPrefixLength.bitCount()-bitsSoFar)
}
div := newRangePrefixDivision(divLowerValue, divUpperValue, segPrefixBits, originalDivBitSize)
divs[divCount-i-1] = div
if segBits == 0 && i > 0 {
//get next seg
currentSegmentIndex++
seg = grouping.getDivision(currentSegmentIndex)
segLowerVal = seg.getDivisionValue()
segUpperVal = seg.getUpperDivisionValue()
segBits = seg.getBitCount()
}
break
} else {
// if the division upper bits are multiple, then the lower bits inserted must be full range
if divLowerValue != divUpperValue {
if segLowerVal != 0 || segUpperVal != ^(^uint64(0)<<uint(segBits)) {
return nil, &incompatibleAddressError{addressError: addressError{key: "ipaddress.error.invalid.joined.ranges"}}
}
}
diff := uint(divBitSize - segBits)
divLowerValue |= segLowerVal << diff
divUpperValue |= segUpperVal << diff
divBitSize = BitCount(diff)
//get next seg
currentSegmentIndex++
seg = grouping.getDivision(currentSegmentIndex)
segLowerVal = seg.getDivisionValue()
segUpperVal = seg.getUpperDivisionValue()
segBits = seg.getBitCount()
}
}
bitsSoFar += originalDivBitSize
}
}
return divs, nil
}
//// only needed for godoc / pkgsite
// GetBitCount returns the number of bits in each value comprising this address item
func (grouping addressDivisionGroupingInternal) GetBitCount() BitCount {
return grouping.addressDivisionGroupingBase.GetBitCount()
}
// GetByteCount returns the number of bytes required for each value comprising this address item,
// rounding up if the bit count is not a multiple of 8.
func (grouping addressDivisionGroupingInternal) GetByteCount() int {
return grouping.addressDivisionGroupingBase.GetByteCount()
}
// GetGenericDivision returns the division at the given index as a DivisionType implementation
func (grouping *addressDivisionGroupingInternal) GetGenericDivision(index int) DivisionType {
return grouping.addressDivisionGroupingBase.GetGenericDivision(index)
}
// GetDivisionCount returns the number of divisions in this grouping
func (grouping *addressDivisionGroupingInternal) GetDivisionCount() int {
return grouping.addressDivisionGroupingBase.GetDivisionCount()
}
// IsZero returns whether this grouping matches exactly the value of zero
func (grouping *addressDivisionGroupingInternal) IsZero() bool {
return grouping.addressDivisionGroupingBase.IsZero()
}
// IncludesZero returns whether this grouping includes the value of zero within its range
func (grouping *addressDivisionGroupingInternal) IncludesZero() bool {
return grouping.addressDivisionGroupingBase.IncludesZero()
}
// IsMax returns whether this grouping matches exactly the maximum possible value, the value whose bits are all ones
func (grouping *addressDivisionGroupingInternal) IsMax() bool {
return grouping.addressDivisionGroupingBase.IsMax()
}
// IncludesMax returns whether this grouping includes the max value, the value whose bits are all ones, within its range
func (grouping *addressDivisionGroupingInternal) IncludesMax() bool {
return grouping.addressDivisionGroupingBase.IncludesMax()
}
// IsFullRange returns whether this address item represents all possible values attainable by an address item of this type.
//
// This is true if and only if both IncludesZero and IncludesMax return true.
func (grouping *addressDivisionGroupingInternal) IsFullRange() bool {
return grouping.addressDivisionGroupingBase.IsFullRange()
}
// GetSequentialBlockIndex gets the minimal division index for which all following divisions are full-range blocks.
//
// The division at this index is not a full-range block unless all divisions are full-range.
// The division at this index and all following divisions form a sequential range.
// For the full grouping to be sequential, the preceding divisions must be single-valued.
func (grouping *addressDivisionGroupingInternal) GetSequentialBlockIndex() int {
return grouping.addressDivisionGroupingBase.GetSequentialBlockIndex()
}
// GetSequentialBlockCount provides the count of elements from the sequential block iterator, the minimal number of sequential address division groupings that comprise this address division grouping
func (grouping *addressDivisionGroupingInternal) GetSequentialBlockCount() *big.Int {
return grouping.addressDivisionGroupingBase.GetSequentialBlockCount()
}
// GetBlockCount returns the count of distinct values in the given number of initial (more significant) divisions.
func (grouping *addressDivisionGroupingInternal) GetBlockCount(divisionCount int) *big.Int {
return grouping.addressDivisionGroupingBase.GetBlockCount(divisionCount)
}
//// end needed for godoc / pkgsite
// AddressDivisionGrouping objects consist of a series of AddressDivision objects, each division containing a sequential range of values.
//
// AddressDivisionGrouping objects are immutable. This also makes them thread-safe.
//
// AddressDivision objects use uint64 to represent their values, so this places a cap on the size of the divisions in AddressDivisionGrouping.
//
// AddressDivisionGrouping objects are similar to address sections and addresses, except that groupings can have divisions of differing bit-length,
// including divisions that are not an exact number of bytes, whereas all segments in an address or address section must be equal bit size and an exact number of bytes.
type AddressDivisionGrouping struct {
addressDivisionGroupingInternal
}
// Compare returns a negative integer, zero, or a positive integer if this address division grouping is less than, equal, or greater than the given item.
// Any address item is comparable to any other. All address items use CountComparator to compare.
func (grouping *AddressDivisionGrouping) Compare(item AddressItem) int {
return CountComparator.Compare(grouping, item)
}
// CompareSize compares the counts of two address division groupings, the number of individual groupings represented.
//
// Rather than calculating counts with GetCount, there can be more efficient ways of comparing whether one grouping represents more individual address groupings than another.
//
// CompareSize returns a positive integer if this address division grouping has a larger count than the one given, 0 if they are the same, or a negative integer if the other has a larger count.
func (grouping *AddressDivisionGrouping) CompareSize(other StandardDivGroupingType) int {
if grouping == nil {
if other != nil && other.ToDivGrouping() != nil {
// we have size 0, other has size >= 1
return -1
}
return 0
}
return grouping.compareSize(other)
}
// GetCount returns the count of possible distinct values for this item.
// If not representing multiple values, the count is 1,
// unless this is a division grouping with no divisions, or an address section with no segments, in which case it is 0.
//
// Use IsMultiple if you simply want to know if the count is greater than 1.
func (grouping *AddressDivisionGrouping) GetCount() *big.Int {
if grouping == nil {
return bigZero()
}
return grouping.getCount()
}
// IsMultiple returns whether this grouping represents multiple values
func (grouping *AddressDivisionGrouping) IsMultiple() bool {
return grouping != nil && grouping.isMultiple()
}
// IsPrefixed returns whether this grouping has an associated prefix length
func (grouping *AddressDivisionGrouping) IsPrefixed() bool {
if grouping == nil {
return false
}
return grouping.isPrefixed()
}
// CopySubDivisions copies the existing divisions from the given start index until but not including the division at the given end index,
// into the given slice, as much as can be fit into the slice, returning the number of segments copied
func (grouping *AddressDivisionGrouping) CopySubDivisions(start, end int, divs []*AddressDivision) (count int) {
return grouping.copySubDivisions(start, end, divs)
}
// CopyDivisions copies the existing divisions from the given start index until but not including the division at the given end index,
// into the given slice, as much as can be fit into the slice, returning the number of segments copied
func (grouping *AddressDivisionGrouping) CopyDivisions(divs []*AddressDivision) (count int) {
return grouping.copyDivisions(divs)
}
// GetDivisionStrings returns a slice containing each string returned from the String method of each division in the grouping.
func (grouping *AddressDivisionGrouping) GetDivisionStrings() []string {
if grouping == nil {
return nil
}
return grouping.getDivisionStrings()
}
// IsAdaptiveZero returns true if this is an adaptive zero grouping.
// The adaptive zero grouping, produced by zero sections like IPv4AddressSection{} or AddressDivisionGrouping{}, can represent a zero-length section of any address type.
// It is not considered equal to constructions of specific zero length sections or groupings like NewIPv4Section(nil) which can only represent a zero-length section of a single address type.
func (grouping *AddressDivisionGrouping) IsAdaptiveZero() bool {
return grouping != nil && grouping.matchesZeroGrouping()
}
// IsSectionBase returns true if this address division grouping originated as an address section. If so, use ToSectionBase to convert back to the section type.
func (grouping *AddressDivisionGrouping) IsSectionBase() bool {
return grouping != nil && grouping.isAddressSection()
}
// IsIP returns true if this address division grouping originated as an IPv4 or IPv6 section, or a zero-length IP section. If so, use ToIP to convert back to the IP-specific type.
func (grouping *AddressDivisionGrouping) IsIP() bool {
return grouping.ToSectionBase().IsIP()
}
// IsIPv4 returns true if this grouping originated as an IPv4 section. If so, use ToIPv4 to convert back to the IPv4-specific type.
func (grouping *AddressDivisionGrouping) IsIPv4() bool {
return grouping.ToSectionBase().IsIPv4()
}
// IsIPv6 returns true if this grouping originated as an IPv6 section. If so, use ToIPv6 to convert back to the IPv6-specific type.
func (grouping *AddressDivisionGrouping) IsIPv6() bool {
return grouping.ToSectionBase().IsIPv6()
}
// IsMixedIPv6v4 returns true if this grouping originated as a mixed IPv6-IPv4 grouping. If so, use ToMixedIPv6v4 to convert back to the more specific grouping type.
func (grouping *AddressDivisionGrouping) IsMixedIPv6v4() bool {
return grouping != nil && grouping.matchesIPv6v4MixedGroupingType()
}
// IsMAC returns true if this grouping originated as a MAC section. If so, use ToMAC to convert back to the MAC-specific type.
func (grouping *AddressDivisionGrouping) IsMAC() bool {
return grouping.ToSectionBase().IsMAC()
}
// ToSectionBase converts to an address section if this grouping originated as an address section.
// Otherwise, the result will be nil.
//
// ToSectionBase can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToSectionBase() *AddressSection {
if grouping == nil || !grouping.isAddressSection() {
return nil
}
return (*AddressSection)(unsafe.Pointer(grouping))
}
// ToMixedIPv6v4 converts to a mixed IPv6/4 address section if this grouping originated as a mixed IPv6/4 address section.
// Otherwise, the result will be nil.
//
// ToMixedIPv6v4 can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToMixedIPv6v4() *IPv6v4MixedAddressGrouping {
if grouping.matchesIPv6v4MixedGroupingType() {
return (*IPv6v4MixedAddressGrouping)(grouping)
}
return nil
}
// ToIP converts to an IPAddressSection if this grouping originated as an IPv4 or IPv6 section, or an implicitly zero-valued IP section.
// If not, ToIP returns nil.
//
// ToIP can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToIP() *IPAddressSection {
return grouping.ToSectionBase().ToIP()
}
// ToIPv6 converts to an IPv6AddressSection if this grouping originated as an IPv6 section.
// If not, ToIPv6 returns nil.
//
// ToIPv6 can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToIPv6() *IPv6AddressSection {
return grouping.ToSectionBase().ToIPv6()
}
// ToIPv4 converts to an IPv4AddressSection if this grouping originated as an IPv4 section.
// If not, ToIPv4 returns nil.
//
// ToIPv4 can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToIPv4() *IPv4AddressSection {
return grouping.ToSectionBase().ToIPv4()
}
// ToMAC converts to a MACAddressSection if this grouping originated as a MAC section.
// If not, ToMAC returns nil.
//
// ToMAC can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToMAC() *MACAddressSection {
return grouping.ToSectionBase().ToMAC()
}
// ToDivGrouping is an identity method.
//
// ToDivGrouping can be called with a nil receiver, enabling you to chain this method with methods that might return a nil pointer.
func (grouping *AddressDivisionGrouping) ToDivGrouping() *AddressDivisionGrouping {
return grouping
}
// GetDivision returns the division at the given index.
func (grouping *AddressDivisionGrouping) GetDivision(index int) *AddressDivision {
return grouping.getDivision(index)
}
// ForEachDivision visits each segment in order from most-significant to least, the most significant with index 0, calling the given function for each, terminating early if the function returns true
// ForEachDivision returns the number of visited segments.
func (grouping *AddressDivisionGrouping) ForEachDivision(consumer func(divisionIndex int, division *AddressDivision) (stop bool)) int {
divArray := grouping.getDivArray()
if divArray != nil {
for i, div := range divArray {
if consumer(i, div) {
return i + 1
}
}
}
return len(divArray)
}
// String implements the fmt.Stringer interface,
// returning the normalized string provided by ToNormalizedString if this grouping originated as an address section,
// or printed as a slice with each division converted to a string by String ( ie "[ div0 div1 ...]"),
// or "<nil>" if the receiver is a nil pointer
func (grouping *AddressDivisionGrouping) String() string {
if grouping == nil {
return nilString()
}
return grouping.toString()
} |
func (grouping *addressDivisionGroupingInternal) getPrefixLen() PrefixLen { |
Suites.js | "use strict";
var __spreadArray = (this && this.__spreadArray) || function (to, from, pack) {
if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {
if (ar || !(i in from)) {
if (!ar) ar = Array.prototype.slice.call(from, 0, i);
ar[i] = from[i];
}
}
return to.concat(ar || Array.prototype.slice.call(from));
};
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
var expo_constants_1 = __importDefault(require("expo-constants"));
var react_1 = __importDefault(require("react"));
var react_native_1 = require("react-native");
var DoneText_1 = __importDefault(require("./DoneText"));
var SuiteResult_1 = __importDefault(require("./SuiteResult"));
function | (_a) {
var suites = _a.suites, done = _a.done, numFailed = _a.numFailed, results = _a.results;
var ref = react_1.default.useRef(null);
var renderItem = function (_a) {
var item = _a.item;
return react_1.default.createElement(SuiteResult_1.default, { r: item, depth: 0 });
};
var keyExtractor = function (item) { return item.get('result').get('id'); };
var scrollToEnd = react_1.default.useMemo(function () { return function () {
// @ts-ignore
if (ref.current)
ref.current.scrollToEnd({ animated: false });
}; }, [ref]);
react_1.default.useEffect(function () {
if (done && ref.current) {
scrollToEnd();
}
}, [ref, done]);
var ListFooterComponent = function () { return (react_1.default.createElement(DoneText_1.default, { done: done, numFailed: numFailed, results: results })); };
return (react_1.default.createElement(react_native_1.FlatList, { ref: ref, style: styles.list, contentContainerStyle: styles.contentContainerStyle, data: __spreadArray([], suites, true), keyExtractor: keyExtractor, renderItem: renderItem, ListFooterComponent: ListFooterComponent, onContentSizeChange: scrollToEnd, onLayout: scrollToEnd }));
}
exports.default = Suites;
var styles = react_native_1.StyleSheet.create({
contentContainerStyle: {
padding: 5,
paddingBottom: (expo_constants_1.default.statusBarHeight || 24) + 128,
},
list: {
flex: 1,
},
});
//# sourceMappingURL=Suites.js.map | Suites |
unit.rs | use std::ops::*;
use num::traits::Pow;
use matheval::{Unit, BaseUnit, Number};
#[test]
fn base_unit_symbols() {
assert_eq!("s", BaseUnit::Second.symbol());
assert_eq!("m", BaseUnit::Meter.symbol());
assert_eq!("g", BaseUnit::Gram.symbol());
assert_eq!("K", BaseUnit::Kelvin.symbol());
assert_eq!("mol", BaseUnit::Mole.symbol());
assert_eq!("cd", BaseUnit::Candela.symbol());
assert_eq!("A", BaseUnit::Ampere.symbol());
}
#[test]
fn base_unit_try_from() {
assert!(BaseUnit::try_from(0).is_ok());
assert!(BaseUnit::try_from(1).is_ok());
assert!(BaseUnit::try_from(2).is_ok());
assert!(BaseUnit::try_from(3).is_ok());
assert!(BaseUnit::try_from(4).is_ok());
assert!(BaseUnit::try_from(5).is_ok());
assert!(BaseUnit::try_from(6).is_ok());
}
#[test]
fn is_empty() {
assert!(Unit::empty().is_empty());
assert!(!Unit::base(BaseUnit::Second).is_empty());
assert!(!Unit::base(BaseUnit::Meter).is_empty());
assert!(!Unit::base(BaseUnit::Gram).is_empty());
assert!(!Unit::base(BaseUnit::Candela).is_empty());
}
#[test]
fn equal() {
assert_eq!(Unit::empty(), Unit::empty());
assert_eq!(Unit::base(BaseUnit::Second), Unit::base(BaseUnit::Second));
assert_eq!(Unit::base(BaseUnit::Meter), Unit::base(BaseUnit::Meter));
assert_eq!(Unit::base(BaseUnit::Gram), Unit::base(BaseUnit::Gram));
assert_eq!(Unit::base(BaseUnit::Kelvin), Unit::base(BaseUnit::Kelvin));
assert_eq!(Unit::base(BaseUnit::Mole), Unit::base(BaseUnit::Mole));
assert_eq!(Unit::base(BaseUnit::Candela), Unit::base(BaseUnit::Candela));
assert_eq!(Unit::base(BaseUnit::Ampere), Unit::base(BaseUnit::Ampere));
assert_eq!(
Unit::base(BaseUnit::Ampere).pow(Number::from_i64(10)),
Unit::base(BaseUnit::Ampere).pow(Number::from_i64(10))
);
}
| fn not_equal() {
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Second));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Meter));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Gram));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Kelvin));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Mole));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Candela));
assert_ne!(Unit::empty(), Unit::base(BaseUnit::Ampere));
assert_ne!(Unit::base(BaseUnit::Meter), Unit::base(BaseUnit::Second));
assert_eq!(Unit::base(BaseUnit::Ampere).pow(Number::from_i64(1)), Unit::base(BaseUnit::Ampere));
}
#[test]
fn simple_mul() {
assert_eq!(
Unit::base(BaseUnit::Candela).pow(Number::from_i64(2)),
Unit::base(BaseUnit::Candela).mul(Unit::base(BaseUnit::Candela))
);
assert_eq!(Unit::empty(), Unit::base(BaseUnit::Second).mul(Unit::base(BaseUnit::Second).pow(Number::neg_one())));
assert_eq!(
Unit::base(BaseUnit::Candela).mul(Unit::base(BaseUnit::Second)),
Unit::base(BaseUnit::Second).mul(Unit::base(BaseUnit::Candela))
);
}
#[test]
fn simple_div() {
assert_eq!(Unit::empty(), Unit::base(BaseUnit::Candela).div(Unit::base(BaseUnit::Candela)));
assert_eq!(
Unit::base(BaseUnit::Second).pow(Number::from_i64(2)),
Unit::base(BaseUnit::Second).div(Unit::base(BaseUnit::Second).pow(Number::neg_one()))
);
assert_eq!(
Unit::base(BaseUnit::Second).pow(Number::from_i64(-2)),
Unit::base(BaseUnit::Second).pow(Number::neg_one()).div(Unit::base(BaseUnit::Second))
);
assert_eq!(
Unit::base(BaseUnit::Candela).pow(Number::neg_one()).div(Unit::base(BaseUnit::Second).pow(Number::neg_one())),
Unit::base(BaseUnit::Second).div(Unit::base(BaseUnit::Candela))
);
}
#[test]
fn simple_pow() {
assert_eq!(
Unit::base(BaseUnit::Second).pow(Number::from_i64(2)),
Unit::base(BaseUnit::Second).mul(Unit::base(BaseUnit::Second))
);
assert_eq!(
Unit::base(BaseUnit::Second).pow(Number::from_i64(5)),
Unit::base(BaseUnit::Second)
.mul(Unit::base(BaseUnit::Second)).mul(Unit::base(BaseUnit::Second))
.mul(Unit::base(BaseUnit::Second)).mul(Unit::base(BaseUnit::Second))
);
assert_eq!(
Unit::base(BaseUnit::Second).pow(Number::from_i64(-5)),
Unit::empty().div(Unit::base(BaseUnit::Second))
.div(Unit::base(BaseUnit::Second)).div(Unit::base(BaseUnit::Second))
.div(Unit::base(BaseUnit::Second)).div(Unit::base(BaseUnit::Second))
);
assert_eq!(Unit::empty(), Unit::base(BaseUnit::Second).pow(Number::zero()));
}
#[test]
fn to_string_empty() {
assert_eq!("", Unit::empty().to_string());
}
#[test]
fn to_string_base() {
assert_eq!("s", Unit::base(BaseUnit::Second).to_string());
assert_eq!("m", Unit::base(BaseUnit::Meter).to_string());
assert_eq!("g", Unit::base(BaseUnit::Gram).to_string());
assert_eq!("K", Unit::base(BaseUnit::Kelvin).to_string());
assert_eq!("mol", Unit::base(BaseUnit::Mole).to_string());
assert_eq!("cd", Unit::base(BaseUnit::Candela).to_string());
assert_eq!("A", Unit::base(BaseUnit::Ampere).to_string());
}
#[test]
fn to_string_positive_int_power() {
assert_eq!("s^2", Unit::base(BaseUnit::Second).pow(Number::from_i64(2)).to_string());
assert_eq!("A^100", Unit::base(BaseUnit::Ampere).pow(Number::from_i64(100)).to_string());
assert_eq!("mol^10000", Unit::base(BaseUnit::Mole).pow(Number::from_i64(10000)).to_string());
}
#[test]
fn to_string_negative_int_power() {
assert_eq!("s^-2", Unit::base(BaseUnit::Second).pow(Number::from_i64(-2)).to_string());
assert_eq!("A^-100", Unit::base(BaseUnit::Ampere).pow(Number::from_i64(-100)).to_string());
assert_eq!("mol^-10000", Unit::base(BaseUnit::Mole).pow(Number::from_i64(-10000)).to_string());
}
#[test]
fn to_string_positive_fract_power() {
assert_eq!("s^(2/7)", Unit::base(BaseUnit::Second).pow(Number::from_i64s(2, 7)).to_string());
assert_eq!("A^(100/7)", Unit::base(BaseUnit::Ampere).pow(Number::from_i64s(100, 7)).to_string());
assert_eq!("mol^(10000/7)", Unit::base(BaseUnit::Mole).pow(Number::from_i64s(10000, 7)).to_string());
}
#[test]
fn to_string_negative_fract_power() {
assert_eq!("s^(-2/7)", Unit::base(BaseUnit::Second).pow(Number::from_i64s(-2, 7)).to_string());
assert_eq!("A^(-100/7)", Unit::base(BaseUnit::Ampere).pow(Number::from_i64s(-100, 7)).to_string());
assert_eq!("mol^(-10000/7)", Unit::base(BaseUnit::Mole).pow(Number::from_i64s(-10000, 7)).to_string());
}
#[test]
fn to_string_positive_float_power() {
assert_eq!("s^2.75", Unit::base(BaseUnit::Second).pow(Number::Float(2.75)).to_string());
assert_eq!("A^100.5", Unit::base(BaseUnit::Ampere).pow(Number::Float(100.5)).to_string());
assert_eq!("mol^10000.125", Unit::base(BaseUnit::Mole).pow(Number::Float(10000.125)).to_string());
}
#[test]
fn to_string_negative_float_power() {
assert_eq!("s^-2.75", Unit::base(BaseUnit::Second).pow(Number::Float(-2.75)).to_string());
assert_eq!("A^-100.5", Unit::base(BaseUnit::Ampere).pow(Number::Float(-100.5)).to_string());
assert_eq!("mol^-10000.125", Unit::base(BaseUnit::Mole).pow(Number::Float(-10000.125)).to_string());
}
#[test]
fn to_string_combined() {
assert_eq!("m s", Unit::base(BaseUnit::Second).mul(Unit::base(BaseUnit::Meter)).to_string());
assert_eq!("g m s", Unit::base(BaseUnit::Gram).mul(Unit::base(BaseUnit::Second)).mul(Unit::base(BaseUnit::Meter)).to_string());
assert_eq!("g^-1 m^(3/4) s^12 mol^-1",
Unit::base(BaseUnit::Gram).pow(Number::neg_one())
.mul(Unit::base(BaseUnit::Second).pow(Number::from_i64(12)))
.mul(Unit::base(BaseUnit::Meter).pow(Number::from_i64s(3, 4)))
.div(Unit::base(BaseUnit::Mole)).to_string()
);
} | #[test] |
entities.ts | import EntityType from '../types/entity';
import Entity from './entity';
const ENTITIES: Record<string, Omit<EntityType, 'name'>> = { | sources: ['https://en.wikipedia.org/wiki/Nestl%C3%A9_boycott'],
},
};
export default ENTITIES; | [Entity.Nestle]: {
reasons: [
'Nestlé aggressively markets breast milk substitutes, particularly in underdeveloped countries.',
], |
contract.rs | // Copyright 2020 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
// (Re-)generated by schema tool
// >>>> DO NOT CHANGE THIS FILE! <<<<
// Change the json schema instead
#![allow(dead_code)]
use std::ptr;
use wasmlib::*;
use crate::consts::*;
use crate::params::*;
use crate::results::*;
pub struct DivideCall {
pub func: ScFunc,
}
pub struct InitCall {
pub func: ScInitFunc,
pub params: MutableInitParams,
}
pub struct | {
pub func: ScFunc,
pub params: MutableMemberParams,
}
pub struct SetOwnerCall {
pub func: ScFunc,
pub params: MutableSetOwnerParams,
}
pub struct GetFactorCall {
pub func: ScView,
pub params: MutableGetFactorParams,
pub results: ImmutableGetFactorResults,
}
pub struct GetOwnerCall {
pub func: ScView,
pub results: ImmutableGetOwnerResults,
}
pub struct ScFuncs {
}
impl ScFuncs {
pub fn divide(_ctx: & dyn ScFuncCallContext) -> DivideCall {
DivideCall {
func: ScFunc::new(HSC_NAME, HFUNC_DIVIDE),
}
}
pub fn init(_ctx: & dyn ScFuncCallContext) -> InitCall {
let mut f = InitCall {
func: ScInitFunc::new(HSC_NAME, HFUNC_INIT),
params: MutableInitParams { id: 0 },
};
f.func.set_ptrs(&mut f.params.id, ptr::null_mut());
f
}
pub fn member(_ctx: & dyn ScFuncCallContext) -> MemberCall {
let mut f = MemberCall {
func: ScFunc::new(HSC_NAME, HFUNC_MEMBER),
params: MutableMemberParams { id: 0 },
};
f.func.set_ptrs(&mut f.params.id, ptr::null_mut());
f
}
pub fn set_owner(_ctx: & dyn ScFuncCallContext) -> SetOwnerCall {
let mut f = SetOwnerCall {
func: ScFunc::new(HSC_NAME, HFUNC_SET_OWNER),
params: MutableSetOwnerParams { id: 0 },
};
f.func.set_ptrs(&mut f.params.id, ptr::null_mut());
f
}
pub fn get_factor(_ctx: & dyn ScViewCallContext) -> GetFactorCall {
let mut f = GetFactorCall {
func: ScView::new(HSC_NAME, HVIEW_GET_FACTOR),
params: MutableGetFactorParams { id: 0 },
results: ImmutableGetFactorResults { id: 0 },
};
f.func.set_ptrs(&mut f.params.id, &mut f.results.id);
f
}
pub fn get_owner(_ctx: & dyn ScViewCallContext) -> GetOwnerCall {
let mut f = GetOwnerCall {
func: ScView::new(HSC_NAME, HVIEW_GET_OWNER),
results: ImmutableGetOwnerResults { id: 0 },
};
f.func.set_ptrs(ptr::null_mut(), &mut f.results.id);
f
}
}
| MemberCall |
__init__.py | """
An enhanced distutils, providing support for Fortran compilers, for BLAS,
LAPACK and other common libraries for numerical computing, and more.
Public submodules are::
misc_util
system_info
cpu_info
log
exec_command
For details, please see the *Packaging* and *NumPy Distutils User Guide*
sections of the NumPy Reference Guide.
For configuring the preference for and location of libraries like BLAS and
LAPACK, and for setting include paths and similar build options, please see
``site.cfg.example`` in the root of the NumPy repository or sdist.
"""
import warnings
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
from .npy_pkg_config import *
warnings.warn("\n\n"
" `numpy.distutils` is deprecated since NumPy 1.23.0, as a result\n"
" of the deprecation of `distutils` itself. It will be removed for\n"
" Python >= 3.12. For older Python versions it will remain present.\n"
" It is recommended to use `setuptools < 60.0` for those Python versions.\n"
" For more details, see:\n"
" https://numpy.org/devdocs/reference/distutils_status_migration.html \n\n",
DeprecationWarning, stacklevel=2
)
del warnings
# If numpy is installed, add distutils.test()
try:
from . import __config__
# Normally numpy is installed if the above import works, but an interrupted
# in-place build could also have left a __config__.py. In that case the
# next import may still fail, so keep it inside the try block.
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
except ImportError:
pass
def customized_fcompiler(plat=None, compiler=None):
|
def customized_ccompiler(plat=None, compiler=None, verbose=1):
c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c
| from numpy.distutils.fcompiler import new_fcompiler
c = new_fcompiler(plat=plat, compiler=compiler)
c.customize()
return c |
merge-binvox.go | // merge-binvox takes a filename prefix and reads in all files matching
// prefix*.binvox. It merges the binvox files together as VShells, then writes
// out prefix.vsh as a merged model.
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"github.com/gmlewis/stldice/v4/vshell"
)
var (
force = flag.Bool("f", false, "Force overwrite of output file")
prefix = flag.String("prefix", "out", "Prefix of files to merge and name of .vsh file to write (default='out')")
)
func | () {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", filepath.Base(os.Args[0]))
fmt.Fprintf(os.Stderr, "\t%v -prefix out\n\nOptions:\n", filepath.Base(os.Args[0]))
flag.PrintDefaults()
}
flag.Parse()
files, err := filepath.Glob(*prefix + "*.binvox")
if err != nil {
log.Fatalf("No files found matching %v*.binvox", *prefix)
}
outFile := *prefix + ".vsh"
var newFiles []string
for _, f := range files { // Quick sanity check
if f != outFile {
newFiles = append(newFiles, f)
continue
}
if *force {
log.Printf("%v already exists. Overwriting due to -f flag.", outFile)
} else {
log.Fatalf("%v already exists. To overwrite, use -f flag.", outFile)
}
}
log.Printf("Merging %v binvox files into %v...", len(newFiles), outFile)
vsh, err := vshell.Merge(newFiles)
if err != nil {
log.Fatalf("Merge: %v", err)
}
if err := vsh.Write(outFile, 0, 0, 0, 0, 0, 0); err != nil {
log.Fatalf("Save: %v", err)
}
log.Printf("Done.")
}
| main |
argument.rs | use super::*;
/// FFI: Use **Argument\<java::lang::Object\>** instead of jobject. This represents a (null?) function argument.
///
/// Unlike most Java reference types from this library, this *can* be null.
///
/// FFI safe where a jobject is safe, assuming you match your types correctly. Using the wrong type may result in
/// soundness issues, but at least on Android mostly seems to just result in JNI aborting execution for the current
/// process when calling methods on an instance of the wrong type.
#[repr(transparent)]
pub struct Argument<Class: AsValidJObjectAndEnv> {
object: jobject,
_class: PhantomData<Class>,
}
impl<Class: AsValidJObjectAndEnv> Argument<Class> {
/// **unsafe**: There's no guarantee the jobject being passed is valid or null, nor any means of checking it.
pub unsafe fn from_unchecked(object: jobject) -> Self { Self { object, _class: PhantomData } }
/// **unsafe**: This assumes the argument belongs to the given Env/VM, which is technically unsound. However, the
/// intended use case of immediately converting any Argument s into ArgumentRef s at the start of a JNI callback,
/// where Java directly invoked your function with an Env + arguments, is sound.
pub unsafe fn with_unchecked<'env>(&'env self, env: &'env Env) -> Option<ArgumentRef<'env, Class>> {
if self.object.is_null() {
None
} else {
let env = env.as_jni_env();
Some(ArgumentRef {
oae: ObjectAndEnv {
object: self.object,
env,
},
_env: PhantomData,
_class: PhantomData,
})
}
}
/// **unsafe**: This assumes the argument belongs to the given Env/VM, which is technically unsound. However, the
/// intended use case of immediately converting any Argument s into ArgumentRef s at the start of a JNI callback,
/// where Java directly invoked your function with an Env + arguments, is sound.
pub unsafe fn into_global(self, env: &Env) -> Option<Global<Class>> {
if self.object.is_null() | else {
let jnienv = env.as_jni_env();
let gen_vm = env.get_gen_vm();
let global = (**jnienv).NewGlobalRef.unwrap()(jnienv, self.object);
Some(Global {
global,
gen_vm,
pd: PhantomData,
})
}
}
}
/// A [Local](https://www.ibm.com/support/knowledgecenter/en/SSYKE2_8.0.0/com.ibm.java.vm.80.doc/docs/jni_refs.html),
/// non-null, reference to a Java object (+ &Env).
///
/// Much like Local, the inclusion of an Env means this cannot be stored statically or shared between threads.
///
/// **Not FFI Safe:** #\[repr(rust)\], and exact layout is likely to change - depending on exact features used - in the
/// future. Specifically, on Android, since we're guaranteed to only have a single ambient VM, we can likely store the
/// \*const JNIEnv in thread local storage instead of lugging it around in every Local. Of course, there's no
/// guarantee that's actually an *optimization*...
pub type ArgumentRef<'env, Class> = Ref<'env, Class>;
| {
None
} |
SigningLineEstate.ts | import { Paragraph, TextRun } from 'docx';
import { IndividualBeing } from '../../data/parties/base';
import {
estateNameKey,
genderKey,
nameKey,
} from '../../data/parties/grantorKey';
import { getDescriptorByGender } from '../../deed-form/parties/relationship/descriptors/descriptor-input-buttons';
export function SigningLineEstate(
rep: IndividualBeing,
descriptor: string,
estate: IndividualBeing,
) {
return [
new Paragraph({
children: [
new TextRun({}).break(),
new TextRun({}).break(), | size: 24,
font: 'Times New Roman',
}),
],
}),
new Paragraph({
indent: {
left: 5000,
},
children: [
new TextRun({
text: `${rep[nameKey]}, ${getDescriptorByGender(
descriptor,
rep[genderKey],
)} of ${estate[estateNameKey]}`,
size: 24,
font: 'Times New Roman',
}),
],
}),
];
} | new TextRun({}).break(),
new TextRun({
text:
'___________________________ _____________________________________', |
namespaceCache.go | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination namespaceCache_mock.go -self_package go.temporal.io/server/common/cache
package cache
import (
"fmt"
"hash/fnv"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/gogo/protobuf/proto"
namespacepb "go.temporal.io/api/namespace/v1"
"go.temporal.io/api/serviceerror"
"go.temporal.io/server/api/persistenceblobs/v1"
"go.temporal.io/server/common"
"go.temporal.io/server/common/clock"
"go.temporal.io/server/common/cluster"
"go.temporal.io/server/common/log"
"go.temporal.io/server/common/log/tag"
"go.temporal.io/server/common/metrics"
"go.temporal.io/server/common/persistence"
"go.temporal.io/server/common/primitives/timestamp"
)
// ReplicationPolicy is the namespace's replication policy,
// derived from namespace's replication config
type ReplicationPolicy int
const (
// ReplicationPolicyOneCluster indicate that workflows does not need to be replicated
// applicable to local namespace & global namespace with one cluster
ReplicationPolicyOneCluster ReplicationPolicy = 0
// ReplicationPolicyMultiCluster indicate that workflows need to be replicated
ReplicationPolicyMultiCluster ReplicationPolicy = 1
)
const (
namespaceCacheInitialSize = 10 * 1024
namespaceCacheMaxSize = 64 * 1024
namespaceCacheTTL = 0 // 0 means infinity
// NamespaceCacheMinRefreshInterval is a minimun namespace cache refresh interval.
NamespaceCacheMinRefreshInterval = 2 * time.Second
// NamespaceCacheRefreshInterval namespace cache refresh interval
NamespaceCacheRefreshInterval = 10 * time.Second
// NamespaceCacheRefreshFailureRetryInterval is the wait time
// if refreshment encounters error
NamespaceCacheRefreshFailureRetryInterval = 1 * time.Second
namespaceCacheRefreshPageSize = 200
namespaceCacheInitialized int32 = 0
namespaceCacheStarted int32 = 1
namespaceCacheStopped int32 = 2
)
type (
// PrepareCallbackFn is function to be called before CallbackFn is called,
// it is guaranteed that PrepareCallbackFn and CallbackFn pair will be both called or non will be called
PrepareCallbackFn func()
// CallbackFn is function to be called when the namespace cache entries are changed
// it is guaranteed that PrepareCallbackFn and CallbackFn pair will be both called or non will be called
CallbackFn func(prevNamespaces []*NamespaceCacheEntry, nextNamespaces []*NamespaceCacheEntry)
// NamespaceCache is used the cache namespace information and configuration to avoid making too many calls to cassandra.
// This cache is mainly used by frontend for resolving namespace names to namespace uuids which are used throughout the
// system. Each namespace entry is kept in the cache for one hour but also has an expiry of 10 seconds. This results
// in updating the namespace entry every 10 seconds but in the case of a cassandra failure we can still keep on serving
// requests using the stale entry from cache upto an hour
NamespaceCache interface {
common.Daemon
RegisterNamespaceChangeCallback(shard int32, initialNotificationVersion int64, prepareCallback PrepareCallbackFn, callback CallbackFn)
UnregisterNamespaceChangeCallback(shard int32)
GetNamespace(name string) (*NamespaceCacheEntry, error)
GetNamespaceByID(id string) (*NamespaceCacheEntry, error)
GetNamespaceID(name string) (string, error)
GetNamespaceName(id string) (string, error)
GetAllNamespace() map[string]*NamespaceCacheEntry
GetCacheSize() (sizeOfCacheByName int64, sizeOfCacheByID int64)
}
namespaceCache struct {
status int32
shutdownChan chan struct{}
cacheNameToID *atomic.Value
cacheByID *atomic.Value
metadataMgr persistence.MetadataManager
clusterMetadata cluster.Metadata
timeSource clock.TimeSource
metricsClient metrics.Client
logger log.Logger
// refresh lock is used to guarantee at most one
// coroutine is doing namespace refreshment
refreshLock sync.Mutex
lastRefreshTime atomic.Value
checkLock sync.Mutex
lastCheckTime time.Time
callbackLock sync.Mutex
prepareCallbacks map[int32]PrepareCallbackFn
callbacks map[int32]CallbackFn
}
// NamespaceCacheEntries is NamespaceCacheEntry slice
NamespaceCacheEntries []*NamespaceCacheEntry
// NamespaceCacheEntry contains the info and config for a namespace
NamespaceCacheEntry struct {
clusterMetadata cluster.Metadata
sync.RWMutex
info *persistenceblobs.NamespaceInfo
config *persistenceblobs.NamespaceConfig
replicationConfig *persistenceblobs.NamespaceReplicationConfig
configVersion int64
failoverVersion int64
isGlobalNamespace bool
failoverNotificationVersion int64
notificationVersion int64
initialized bool
}
)
// NewNamespaceCache creates a new instance of cache for holding onto namespace information to reduce the load on persistence
func NewNamespaceCache(
metadataMgr persistence.MetadataManager,
clusterMetadata cluster.Metadata,
metricsClient metrics.Client,
logger log.Logger,
) NamespaceCache {
cache := &namespaceCache{
status: namespaceCacheInitialized,
shutdownChan: make(chan struct{}),
cacheNameToID: &atomic.Value{},
cacheByID: &atomic.Value{},
metadataMgr: metadataMgr,
clusterMetadata: clusterMetadata,
timeSource: clock.NewRealTimeSource(),
metricsClient: metricsClient,
logger: logger,
prepareCallbacks: make(map[int32]PrepareCallbackFn),
callbacks: make(map[int32]CallbackFn),
}
cache.cacheNameToID.Store(newNamespaceCache())
cache.cacheByID.Store(newNamespaceCache())
cache.lastRefreshTime.Store(time.Time{})
return cache
}
func newNamespaceCache() Cache {
opts := &Options{}
opts.InitialCapacity = namespaceCacheInitialSize
opts.TTL = namespaceCacheTTL
return New(namespaceCacheMaxSize, opts)
}
func newNamespaceCacheEntry(
clusterMetadata cluster.Metadata,
) *NamespaceCacheEntry {
return &NamespaceCacheEntry{
clusterMetadata: clusterMetadata,
initialized: false,
}
}
// NewGlobalNamespaceCacheEntryForTest returns an entry with test data
func NewGlobalNamespaceCacheEntryForTest(
info *persistenceblobs.NamespaceInfo,
config *persistenceblobs.NamespaceConfig,
repConfig *persistenceblobs.NamespaceReplicationConfig,
failoverVersion int64,
clusterMetadata cluster.Metadata,
) *NamespaceCacheEntry {
return &NamespaceCacheEntry{
info: info,
config: config,
isGlobalNamespace: true,
replicationConfig: repConfig,
failoverVersion: failoverVersion,
clusterMetadata: clusterMetadata,
}
}
// NewLocalNamespaceCacheEntryForTest returns an entry with test data
func NewLocalNamespaceCacheEntryForTest(
info *persistenceblobs.NamespaceInfo,
config *persistenceblobs.NamespaceConfig,
targetCluster string,
clusterMetadata cluster.Metadata,
) *NamespaceCacheEntry {
return &NamespaceCacheEntry{
info: info,
config: config,
isGlobalNamespace: false,
replicationConfig: &persistenceblobs.NamespaceReplicationConfig{
ActiveClusterName: targetCluster,
Clusters: []string{targetCluster},
},
failoverVersion: common.EmptyVersion,
clusterMetadata: clusterMetadata,
}
}
// NewNamespaceCacheEntryForTest returns an entry with test data
func NewNamespaceCacheEntryForTest(
info *persistenceblobs.NamespaceInfo,
config *persistenceblobs.NamespaceConfig,
isGlobalNamespace bool,
repConfig *persistenceblobs.NamespaceReplicationConfig,
failoverVersion int64,
clusterMetadata cluster.Metadata,
) *NamespaceCacheEntry {
return &NamespaceCacheEntry{
info: info,
config: config,
isGlobalNamespace: isGlobalNamespace,
replicationConfig: repConfig,
failoverVersion: failoverVersion,
clusterMetadata: clusterMetadata,
}
}
func (c *namespaceCache) GetCacheSize() (sizeOfCacheByName int64, sizeOfCacheByID int64) {
return int64(c.cacheByID.Load().(Cache).Size()), int64(c.cacheNameToID.Load().(Cache).Size())
}
// Start start the background refresh of namespace
func (c *namespaceCache) Start() {
if !atomic.CompareAndSwapInt32(&c.status, namespaceCacheInitialized, namespaceCacheStarted) {
return
}
// initialize the cache by initial scan
err := c.refreshNamespaces()
if err != nil {
c.logger.Fatal("Unable to initialize namespace cache", tag.Error(err))
}
go c.refreshLoop()
}
// Start start the background refresh of namespace
func (c *namespaceCache) Stop() {
if !atomic.CompareAndSwapInt32(&c.status, namespaceCacheStarted, namespaceCacheStopped) {
return
}
close(c.shutdownChan)
}
func (c *namespaceCache) GetAllNamespace() map[string]*NamespaceCacheEntry {
result := make(map[string]*NamespaceCacheEntry)
ite := c.cacheByID.Load().(Cache).Iterator()
defer ite.Close()
for ite.HasNext() {
entry := ite.Next()
id := entry.Key().(string)
namespaceCacheEntry := entry.Value().(*NamespaceCacheEntry)
namespaceCacheEntry.RLock()
dup := namespaceCacheEntry.duplicate()
namespaceCacheEntry.RUnlock()
result[id] = dup
}
return result
}
// RegisterNamespaceChangeCallback set a namespace change callback
// WARN: the beforeCallback function will be triggered by namespace cache when holding the namespace cache lock,
// make sure the callback function will not call namespace cache again in case of dead lock
// afterCallback will be invoked when NOT holding the namespace cache lock.
func (c *namespaceCache) RegisterNamespaceChangeCallback(
shard int32,
initialNotificationVersion int64,
prepareCallback PrepareCallbackFn,
callback CallbackFn,
) {
c.callbackLock.Lock()
c.prepareCallbacks[shard] = prepareCallback
c.callbacks[shard] = callback
c.callbackLock.Unlock()
// this section is trying to make the shard catch up with namespace changes
namespaces := NamespaceCacheEntries{}
for _, namespace := range c.GetAllNamespace() {
namespaces = append(namespaces, namespace)
}
// we mush notify the change in a ordered fashion
// since history shard have to update the shard info
// with namespace change version.
sort.Sort(namespaces)
var prevEntries []*NamespaceCacheEntry
var nextEntries []*NamespaceCacheEntry
for _, namespace := range namespaces {
if namespace.notificationVersion >= initialNotificationVersion {
prevEntries = append(prevEntries, nil)
nextEntries = append(nextEntries, namespace)
}
}
if len(prevEntries) > 0 {
prepareCallback()
callback(prevEntries, nextEntries)
}
}
// UnregisterNamespaceChangeCallback delete a namespace failover callback
func (c *namespaceCache) UnregisterNamespaceChangeCallback(
shard int32,
) {
c.callbackLock.Lock()
defer c.callbackLock.Unlock()
delete(c.prepareCallbacks, shard)
delete(c.callbacks, shard)
}
// GetNamespace retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) GetNamespace(
name string,
) (*NamespaceCacheEntry, error) {
if name == "" {
return nil, serviceerror.NewInvalidArgument("Namespace is empty.")
}
return c.getNamespace(name)
}
// GetNamespaceByID retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) GetNamespaceByID(
id string,
) (*NamespaceCacheEntry, error) {
if id == "" {
return nil, serviceerror.NewInvalidArgument("NamespaceID is empty.")
}
return c.getNamespaceByID(id, true)
}
// GetNamespaceID retrieves namespaceID by using GetNamespace
func (c *namespaceCache) GetNamespaceID(
name string,
) (string, error) {
entry, err := c.GetNamespace(name)
if err != nil {
return "", err
}
return entry.info.Id, nil
}
// GetNamespaceName returns namespace name given the namespace id
func (c *namespaceCache) GetNamespaceName(
id string,
) (string, error) {
entry, err := c.getNamespaceByID(id, false)
if err != nil {
return "", err
}
return entry.info.Name, nil
}
func (c *namespaceCache) refreshLoop() {
timer := time.NewTicker(NamespaceCacheRefreshInterval)
defer timer.Stop()
for {
select {
case <-c.shutdownChan:
return
case <-timer.C:
for err := c.refreshNamespaces(); err != nil; err = c.refreshNamespaces() {
c.logger.Error("Error refreshing namespace cache", tag.Error(err))
time.Sleep(NamespaceCacheRefreshFailureRetryInterval)
if _, opened := <-c.shutdownChan; !opened {
return
}
}
}
}
}
func (c *namespaceCache) refreshNamespaces() error {
c.refreshLock.Lock()
defer c.refreshLock.Unlock()
return c.refreshNamespacesLocked()
}
// this function only refresh the namespaces in the v2 table
// the namespaces in the v1 table will be refreshed if cache is stale
func (c *namespaceCache) refreshNamespacesLocked() error {
now := c.timeSource.Now()
// first load the metadata record, then load namespaces
// this can guarantee that namespaces in the cache are not updated more than metadata record
metadata, err := c.metadataMgr.GetMetadata()
if err != nil {
return err
}
namespaceNotificationVersion := metadata.NotificationVersion
var token []byte
request := &persistence.ListNamespacesRequest{PageSize: namespaceCacheRefreshPageSize}
var namespaces NamespaceCacheEntries
continuePage := true
for continuePage {
request.NextPageToken = token
response, err := c.metadataMgr.ListNamespaces(request)
if err != nil {
return err
}
token = response.NextPageToken
for _, namespace := range response.Namespaces {
namespaces = append(namespaces, c.buildEntryFromRecord(namespace))
}
continuePage = len(token) != 0
}
// we mush apply the namespace change by order
// since history shard have to update the shard info
// with namespace change version.
sort.Sort(namespaces)
var prevEntries []*NamespaceCacheEntry
var nextEntries []*NamespaceCacheEntry
// make a copy of the existing namespace cache, so we can calculate diff and do compare and swap
newCacheNameToID := newNamespaceCache()
newCacheByID := newNamespaceCache()
for _, namespace := range c.GetAllNamespace() {
newCacheNameToID.Put(namespace.info.Name, namespace.info.Id)
newCacheByID.Put(namespace.info.Id, namespace)
}
UpdateLoop:
for _, namespace := range namespaces {
if namespace.notificationVersion >= namespaceNotificationVersion {
// this guarantee that namespace change events before the
// namespaceNotificationVersion is loaded into the cache.
// the namespace change events after the namespaceNotificationVersion
// will be loaded into cache in the next refresh
break UpdateLoop
}
prevEntry, nextEntry, err := c.updateIDToNamespaceCache(newCacheByID, namespace.info.Id, namespace)
if err != nil {
return err
}
c.updateNameToIDCache(newCacheNameToID, nextEntry.info.Name, nextEntry.info.Id)
if prevEntry != nil {
prevEntries = append(prevEntries, prevEntry)
nextEntries = append(nextEntries, nextEntry)
}
}
// NOTE: READ REF BEFORE MODIFICATION
// ref: historyEngine.go registerNamespaceFailoverCallback function
c.callbackLock.Lock()
defer c.callbackLock.Unlock()
c.triggerNamespaceChangePrepareCallbackLocked()
c.cacheByID.Store(newCacheByID)
c.cacheNameToID.Store(newCacheNameToID)
c.triggerNamespaceChangeCallbackLocked(prevEntries, nextEntries)
// only update last refresh time when refresh succeeded
c.lastRefreshTime.Store(now)
return nil
}
func (c *namespaceCache) checkAndContinue(
name string,
id string,
) (bool, error) {
now := c.timeSource.Now()
if now.Sub(c.lastRefreshTime.Load().(time.Time)) < NamespaceCacheMinRefreshInterval {
return false, nil
}
c.checkLock.Lock()
defer c.checkLock.Unlock()
now = c.timeSource.Now()
if now.Sub(c.lastCheckTime) < NamespaceCacheMinRefreshInterval {
return true, nil
}
c.lastCheckTime = now
_, err := c.metadataMgr.GetNamespace(&persistence.GetNamespaceRequest{Name: name, ID: id})
if err != nil {
return false, err
}
return true, nil
}
func (c *namespaceCache) updateNameToIDCache(
cacheNameToID Cache,
name string,
id string,
) {
cacheNameToID.Put(name, id)
}
func (c *namespaceCache) updateIDToNamespaceCache(
cacheByID Cache,
id string,
record *NamespaceCacheEntry,
) (*NamespaceCacheEntry, *NamespaceCacheEntry, error) {
elem, err := cacheByID.PutIfNotExist(id, newNamespaceCacheEntry(c.clusterMetadata))
if err != nil {
return nil, nil, err
}
entry := elem.(*NamespaceCacheEntry)
entry.Lock()
defer entry.Unlock()
var prevNamespace *NamespaceCacheEntry
triggerCallback := c.clusterMetadata.IsGlobalNamespaceEnabled() &&
// initialized will be true when the entry contains valid data
entry.initialized &&
record.notificationVersion > entry.notificationVersion
if triggerCallback {
prevNamespace = entry.duplicate()
}
entry.info = record.info
entry.config = record.config
entry.replicationConfig = record.replicationConfig
entry.configVersion = record.configVersion
entry.failoverVersion = record.failoverVersion
entry.isGlobalNamespace = record.isGlobalNamespace
entry.failoverNotificationVersion = record.failoverNotificationVersion
entry.notificationVersion = record.notificationVersion
entry.initialized = record.initialized
nextNamespace := entry.duplicate()
return prevNamespace, nextNamespace, nil
}
// getNamespace retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) getNamespace(
name string,
) (*NamespaceCacheEntry, error) {
id, cacheHit := c.cacheNameToID.Load().(Cache).Get(name).(string)
if cacheHit {
return c.getNamespaceByID(id, true)
}
doContinue, err := c.checkAndContinue(name, "")
if err != nil {
return nil, err
}
if !doContinue {
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace: %v not found", name))
}
c.refreshLock.Lock()
defer c.refreshLock.Unlock()
id, cacheHit = c.cacheNameToID.Load().(Cache).Get(name).(string)
if cacheHit {
return c.getNamespaceByID(id, true)
}
if err := c.refreshNamespacesLocked(); err != nil {
return nil, err
}
id, cacheHit = c.cacheNameToID.Load().(Cache).Get(name).(string) | // impossible case
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace: %v not found", name))
}
// getNamespaceByID retrieves the information from the cache if it exists, otherwise retrieves the information from metadata
// store and writes it to the cache with an expiry before returning back
func (c *namespaceCache) getNamespaceByID(
id string,
deepCopy bool,
) (*NamespaceCacheEntry, error) {
var result *NamespaceCacheEntry
entry, cacheHit := c.cacheByID.Load().(Cache).Get(id).(*NamespaceCacheEntry)
if cacheHit {
entry.RLock()
result = entry
if deepCopy {
result = entry.duplicate()
}
entry.RUnlock()
return result, nil
}
doContinue, err := c.checkAndContinue("", id)
if err != nil {
return nil, err
}
if !doContinue {
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace ID: %v not found", id))
}
c.refreshLock.Lock()
defer c.refreshLock.Unlock()
entry, cacheHit = c.cacheByID.Load().(Cache).Get(id).(*NamespaceCacheEntry)
if cacheHit {
entry.RLock()
result = entry
if deepCopy {
result = entry.duplicate()
}
entry.RUnlock()
return result, nil
}
if err := c.refreshNamespacesLocked(); err != nil {
return nil, err
}
entry, cacheHit = c.cacheByID.Load().(Cache).Get(id).(*NamespaceCacheEntry)
if cacheHit {
entry.RLock()
result = entry
if deepCopy {
result = entry.duplicate()
}
entry.RUnlock()
return result, nil
}
return nil, serviceerror.NewNotFound(fmt.Sprintf("namespace ID: %v not found", id))
}
func (c *namespaceCache) triggerNamespaceChangePrepareCallbackLocked() {
sw := c.metricsClient.StartTimer(metrics.NamespaceCacheScope, metrics.NamespaceCachePrepareCallbacksLatency)
defer sw.Stop()
for _, prepareCallback := range c.prepareCallbacks {
prepareCallback()
}
}
func (c *namespaceCache) triggerNamespaceChangeCallbackLocked(
prevNamespaces []*NamespaceCacheEntry,
nextNamespaces []*NamespaceCacheEntry,
) {
sw := c.metricsClient.StartTimer(metrics.NamespaceCacheScope, metrics.NamespaceCacheCallbacksLatency)
defer sw.Stop()
for _, callback := range c.callbacks {
callback(prevNamespaces, nextNamespaces)
}
}
func (c *namespaceCache) buildEntryFromRecord(
record *persistence.GetNamespaceResponse,
) *NamespaceCacheEntry {
// this is a shallow copy, but since the record is generated by persistence
// and only accessible here, it would be fine
newEntry := newNamespaceCacheEntry(c.clusterMetadata)
newEntry.info = record.Namespace.Info
newEntry.config = record.Namespace.Config
newEntry.replicationConfig = record.Namespace.ReplicationConfig
newEntry.configVersion = record.Namespace.ConfigVersion
newEntry.failoverVersion = record.Namespace.FailoverVersion
newEntry.isGlobalNamespace = record.IsGlobalNamespace
newEntry.failoverNotificationVersion = record.Namespace.FailoverNotificationVersion
newEntry.notificationVersion = record.NotificationVersion
newEntry.initialized = true
return newEntry
}
func (entry *NamespaceCacheEntry) duplicate() *NamespaceCacheEntry {
// this is a deep copy
result := newNamespaceCacheEntry(entry.clusterMetadata)
result.info = proto.Clone(entry.info).(*persistenceblobs.NamespaceInfo)
if result.info.Data == nil {
result.info.Data = make(map[string]string, 0)
}
result.config = proto.Clone(entry.config).(*persistenceblobs.NamespaceConfig)
if result.config.BadBinaries == nil || result.config.BadBinaries.Binaries == nil {
result.config.BadBinaries.Binaries = make(map[string]*namespacepb.BadBinaryInfo, 0)
}
result.replicationConfig = proto.Clone(entry.replicationConfig).(*persistenceblobs.NamespaceReplicationConfig)
result.configVersion = entry.configVersion
result.failoverVersion = entry.failoverVersion
result.isGlobalNamespace = entry.isGlobalNamespace
result.failoverNotificationVersion = entry.failoverNotificationVersion
result.notificationVersion = entry.notificationVersion
result.initialized = entry.initialized
return result
}
// GetInfo return the namespace info
func (entry *NamespaceCacheEntry) GetInfo() *persistenceblobs.NamespaceInfo {
return entry.info
}
// GetConfig return the namespace config
func (entry *NamespaceCacheEntry) GetConfig() *persistenceblobs.NamespaceConfig {
return entry.config
}
// GetReplicationConfig return the namespace replication config
func (entry *NamespaceCacheEntry) GetReplicationConfig() *persistenceblobs.NamespaceReplicationConfig {
return entry.replicationConfig
}
// GetConfigVersion return the namespace config version
func (entry *NamespaceCacheEntry) GetConfigVersion() int64 {
return entry.configVersion
}
// GetFailoverVersion return the namespace failover version
func (entry *NamespaceCacheEntry) GetFailoverVersion() int64 {
return entry.failoverVersion
}
// IsGlobalNamespace return whether the namespace is a global namespace
func (entry *NamespaceCacheEntry) IsGlobalNamespace() bool {
return entry.isGlobalNamespace
}
// GetFailoverNotificationVersion return the global notification version of when failover happened
func (entry *NamespaceCacheEntry) GetFailoverNotificationVersion() int64 {
return entry.failoverNotificationVersion
}
// GetNotificationVersion return the global notification version of when namespace changed
func (entry *NamespaceCacheEntry) GetNotificationVersion() int64 {
return entry.notificationVersion
}
// IsNamespaceActive return whether the namespace is active, i.e. non global namespace or global namespace which active cluster is the current cluster
func (entry *NamespaceCacheEntry) IsNamespaceActive() bool {
if !entry.isGlobalNamespace {
// namespace is not a global namespace, meaning namespace is always "active" within each cluster
return true
}
return entry.clusterMetadata.GetCurrentClusterName() == entry.replicationConfig.ActiveClusterName
}
// GetReplicationPolicy return the derived workflow replication policy
func (entry *NamespaceCacheEntry) GetReplicationPolicy() ReplicationPolicy {
// frontend guarantee that the clusters always contains the active namespace, so if the # of clusters is 1
// then we do not need to send out any events for replication
if entry.isGlobalNamespace && len(entry.replicationConfig.Clusters) > 1 {
return ReplicationPolicyMultiCluster
}
return ReplicationPolicyOneCluster
}
// GetNamespaceNotActiveErr return err if namespace is not active, nil otherwise
func (entry *NamespaceCacheEntry) GetNamespaceNotActiveErr() error {
if entry.IsNamespaceActive() {
// namespace is consider active
return nil
}
return serviceerror.NewNamespaceNotActive(
entry.info.Name,
entry.clusterMetadata.GetCurrentClusterName(),
entry.replicationConfig.ActiveClusterName,
)
}
// Len return length
func (t NamespaceCacheEntries) Len() int {
return len(t)
}
// Swap implements sort.Interface.
func (t NamespaceCacheEntries) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
// Less implements sort.Interface
func (t NamespaceCacheEntries) Less(i, j int) bool {
return t[i].notificationVersion < t[j].notificationVersion
}
// CreateNamespaceCacheEntry create a cache entry with namespace
func CreateNamespaceCacheEntry(
namespace string,
) *NamespaceCacheEntry {
return &NamespaceCacheEntry{info: &persistenceblobs.NamespaceInfo{Name: namespace}}
}
// SampleRetentionKey is key to specify sample retention
var SampleRetentionKey = "sample_retention_days"
// SampleRateKey is key to specify sample rate
var SampleRateKey = "sample_retention_rate"
// GetRetentionDays returns retention in days for given workflow
func (entry *NamespaceCacheEntry) GetRetentionDays(
workflowID string,
) int32 {
if entry.IsSampledForLongerRetention(workflowID) {
if sampledRetentionValue, ok := entry.info.Data[SampleRetentionKey]; ok {
sampledRetentionDays, err := strconv.Atoi(sampledRetentionValue)
if err != nil || sampledRetentionDays < timestamp.DaysFromDuration(entry.config.Retention) {
return timestamp.DaysInt32FromDuration(entry.config.Retention)
}
return int32(sampledRetentionDays)
}
}
if entry.config.Retention == nil {
return 0
}
return timestamp.DaysInt32FromDuration(entry.config.Retention)
}
// IsSampledForLongerRetentionEnabled return whether sample for longer retention is enabled or not
func (entry *NamespaceCacheEntry) IsSampledForLongerRetentionEnabled(
workflowID string,
) bool {
_, ok := entry.info.Data[SampleRateKey]
return ok
}
// IsSampledForLongerRetention return should given workflow been sampled or not
func (entry *NamespaceCacheEntry) IsSampledForLongerRetention(
workflowID string,
) bool {
if sampledRateValue, ok := entry.info.Data[SampleRateKey]; ok {
sampledRate, err := strconv.ParseFloat(sampledRateValue, 64)
if err != nil {
return false
}
h := fnv.New32a()
_, err = h.Write([]byte(workflowID))
if err != nil {
return false
}
hash := h.Sum32()
r := float64(hash%1000) / float64(1000) // use 1000 so we support one decimal rate like 1.5%.
if r < sampledRate { // sampled
return true
}
}
return false
} | if cacheHit {
return c.getNamespaceByID(id, true)
} |
main.go | package main
import (
"bufio"
"fmt"
"os"
)
var in *bufio.Scanner
func init() |
func nextInt() int {
in.Scan()
x := 0
for _, b := range in.Bytes() {
x *= 10
x += int(b - '0')
}
return x
}
func main() {
N := nextInt()
M := nextInt()
D := make([][]int, N+1)
for i := 1; i <= N; i++ {
D[i] = make([]int, M+1)
for j := 1; j <= M; j++ {
D[i][j] = -1
}
}
D[1][1] = 0
var cut func(int, int) int
cut = func(n, m int) int {
if D[n][m] != -1 {
return D[n][m]
}
x := 1
if n > m {
x += cut(n/2, m)
x += cut(n-n/2, m)
} else {
x += cut(n, m/2)
x += cut(n, m-m/2)
}
D[n][m] = x
return x
}
answer := cut(N, M)
fmt.Println(answer)
}
| {
in = bufio.NewScanner(os.Stdin)
in.Split(bufio.ScanWords)
} |
texture.rs | use super::{
image_texture_conversion::image_to_texture, Extent3d, SamplerDescriptor, TextureDescriptor,
TextureDimension, TextureFormat,
};
use crate::renderer::{
RenderResource, RenderResourceContext, RenderResourceId, RenderResourceType,
};
use bevy_app::prelude::EventReader;
use bevy_asset::{AssetEvent, Assets, Handle};
use bevy_ecs::system::Res;
use bevy_reflect::TypeUuid;
use bevy_utils::HashSet;
use thiserror::Error;
pub const TEXTURE_ASSET_INDEX: u64 = 0;
pub const SAMPLER_ASSET_INDEX: u64 = 1;
#[derive(Debug, Clone, TypeUuid)]
#[uuid = "6ea26da6-6cf8-4ea2-9986-1d7bf6c17d6f"]
pub struct Texture {
pub data: Vec<u8>,
pub size: Extent3d,
pub format: TextureFormat,
pub dimension: TextureDimension,
pub sampler: SamplerDescriptor,
}
impl Default for Texture {
fn default() -> Self {
Texture {
data: Default::default(),
size: Extent3d {
width: 1,
height: 1,
depth: 1,
},
format: TextureFormat::Rgba8UnormSrgb,
dimension: TextureDimension::D2,
sampler: Default::default(),
}
}
}
impl Texture {
pub fn new(
size: Extent3d,
dimension: TextureDimension,
data: Vec<u8>,
format: TextureFormat,
) -> Self {
debug_assert_eq!(
size.volume() * format.pixel_size(),
data.len(),
"Pixel data, size and format have to match",
);
Self {
data,
size,
dimension,
format,
..Default::default()
}
}
pub fn new_fill(
size: Extent3d,
dimension: TextureDimension,
pixel: &[u8],
format: TextureFormat,
) -> Self {
let mut value = Texture {
format,
dimension,
..Default::default()
};
value.resize(size);
debug_assert_eq!(
pixel.len() % format.pixel_size(),
0,
"Must not have incomplete pixel data."
);
debug_assert!(
pixel.len() <= value.data.len(),
"Fill data must fit within pixel buffer."
);
for current_pixel in value.data.chunks_exact_mut(pixel.len()) {
current_pixel.copy_from_slice(&pixel);
}
value
}
pub fn aspect_2d(&self) -> f32 {
self.size.height as f32 / self.size.width as f32
}
pub fn resize(&mut self, size: Extent3d) {
self.size = size;
self.data
.resize(size.volume() * self.format.pixel_size(), 0);
}
/// Changes the `size`, asserting that the total number of data elements (pixels) remains the same.
pub fn reinterpret_size(&mut self, new_size: Extent3d) {
assert!(
new_size.volume() == self.size.volume(),
"Incompatible sizes: old = {:?} new = {:?}",
self.size,
new_size
);
self.size = new_size;
}
/// Takes a 2D texture containing vertically stacked images of the same size, and reinterprets it as a 2D array texture,
/// where each of the stacked images becomes one layer of the array. This is primarily for use with the `texture2DArray`
/// shader uniform type.
pub fn reinterpret_stacked_2d_as_array(&mut self, layers: u32) {
// Must be a stacked image, and the height must be divisible by layers.
assert!(self.dimension == TextureDimension::D2);
assert!(self.size.depth == 1);
assert_eq!(self.size.height % layers, 0);
self.reinterpret_size(Extent3d {
width: self.size.width,
height: self.size.height / layers,
depth: layers,
});
}
/// Convert a texture from a format to another
/// Only a few formats are supported as input and output:
/// - `TextureFormat::R8Unorm`
/// - `TextureFormat::Rg8Unorm`
/// - `TextureFormat::Rgba8UnormSrgb`
/// - `TextureFormat::Bgra8UnormSrgb`
pub fn convert(&self, new_format: TextureFormat) -> Option<Self> {
super::image_texture_conversion::texture_to_image(self)
.and_then(|img| match new_format {
TextureFormat::R8Unorm => Some(image::DynamicImage::ImageLuma8(img.into_luma8())),
TextureFormat::Rg8Unorm => {
Some(image::DynamicImage::ImageLumaA8(img.into_luma_alpha8()))
}
TextureFormat::Rgba8UnormSrgb => {
Some(image::DynamicImage::ImageRgba8(img.into_rgba8()))
}
TextureFormat::Bgra8UnormSrgb => {
Some(image::DynamicImage::ImageBgra8(img.into_bgra8()))
}
_ => None,
})
.map(super::image_texture_conversion::image_to_texture)
}
pub fn texture_resource_system(
render_resource_context: Res<Box<dyn RenderResourceContext>>,
textures: Res<Assets<Texture>>,
mut texture_events: EventReader<AssetEvent<Texture>>,
) {
let render_resource_context = &**render_resource_context;
let mut changed_textures = HashSet::default();
for event in texture_events.iter() {
match event {
AssetEvent::Created { handle } => |
AssetEvent::Modified { handle } => {
changed_textures.insert(handle);
Self::remove_current_texture_resources(render_resource_context, handle);
}
AssetEvent::Removed { handle } => {
Self::remove_current_texture_resources(render_resource_context, handle);
// if texture was modified and removed in the same update, ignore the modification
// events are ordered so future modification events are ok
changed_textures.remove(handle);
}
}
}
for texture_handle in changed_textures.iter() {
if let Some(texture) = textures.get(*texture_handle) {
let texture_descriptor: TextureDescriptor = texture.into();
let texture_resource = render_resource_context.create_texture(texture_descriptor);
let sampler_resource = render_resource_context.create_sampler(&texture.sampler);
render_resource_context.set_asset_resource(
texture_handle,
RenderResourceId::Texture(texture_resource),
TEXTURE_ASSET_INDEX,
);
render_resource_context.set_asset_resource(
texture_handle,
RenderResourceId::Sampler(sampler_resource),
SAMPLER_ASSET_INDEX,
);
}
}
}
fn remove_current_texture_resources(
render_resource_context: &dyn RenderResourceContext,
handle: &Handle<Texture>,
) {
if let Some(RenderResourceId::Texture(resource)) =
render_resource_context.get_asset_resource(handle, TEXTURE_ASSET_INDEX)
{
render_resource_context.remove_texture(resource);
render_resource_context.remove_asset_resource(handle, TEXTURE_ASSET_INDEX);
}
if let Some(RenderResourceId::Sampler(resource)) =
render_resource_context.get_asset_resource(handle, SAMPLER_ASSET_INDEX)
{
render_resource_context.remove_sampler(resource);
render_resource_context.remove_asset_resource(handle, SAMPLER_ASSET_INDEX);
}
}
/// Load a bytes buffer in a [`Texture`], according to type `image_type`, using the `image` crate`
pub fn from_buffer(buffer: &[u8], image_type: ImageType) -> Result<Texture, TextureError> {
let format = match image_type {
ImageType::MimeType(mime_type) => match mime_type {
"image/png" => Ok(image::ImageFormat::Png),
"image/vnd-ms.dds" => Ok(image::ImageFormat::Dds),
"image/x-targa" => Ok(image::ImageFormat::Tga),
"image/x-tga" => Ok(image::ImageFormat::Tga),
"image/jpeg" => Ok(image::ImageFormat::Jpeg),
"image/bmp" => Ok(image::ImageFormat::Bmp),
"image/x-bmp" => Ok(image::ImageFormat::Bmp),
_ => Err(TextureError::InvalidImageMimeType(mime_type.to_string())),
},
ImageType::Extension(extension) => image::ImageFormat::from_extension(extension)
.ok_or_else(|| TextureError::InvalidImageMimeType(extension.to_string())),
}?;
// Load the image in the expected format.
// Some formats like PNG allow for R or RG textures too, so the texture
// format needs to be determined. For RGB textures an alpha channel
// needs to be added, so the image data needs to be converted in those
// cases.
let dyn_img = image::load_from_memory_with_format(buffer, format)?;
Ok(image_to_texture(dyn_img))
}
}
impl RenderResource for Option<Handle<Texture>> {
fn resource_type(&self) -> Option<RenderResourceType> {
self.as_ref().map(|_texture| RenderResourceType::Texture)
}
fn write_buffer_bytes(&self, _buffer: &mut [u8]) {}
fn buffer_byte_len(&self) -> Option<usize> {
None
}
fn texture(&self) -> Option<&Handle<Texture>> {
self.as_ref()
}
}
impl RenderResource for Handle<Texture> {
fn resource_type(&self) -> Option<RenderResourceType> {
Some(RenderResourceType::Texture)
}
fn write_buffer_bytes(&self, _buffer: &mut [u8]) {}
fn buffer_byte_len(&self) -> Option<usize> {
None
}
fn texture(&self) -> Option<&Handle<Texture>> {
Some(self)
}
}
/// An error that occurs when loading a texture
#[derive(Error, Debug)]
pub enum TextureError {
#[error("invalid image mime type")]
InvalidImageMimeType(String),
#[error("invalid image extension")]
InvalidImageExtension(String),
#[error("failed to load an image")]
ImageError(#[from] image::ImageError),
}
/// Type of a raw image buffer
pub enum ImageType<'a> {
/// Mime type of an image, for example `"image/png"`
MimeType(&'a str),
/// Extension of an image file, for example `"png"`
Extension(&'a str),
}
| {
changed_textures.insert(handle);
} |
types.py | from Compiler.program import Tape
from Compiler.exceptions import *
from Compiler.instructions import *
from Compiler.instructions_base import *
from .floatingpoint import two_power
from . import comparison, floatingpoint
import math
from . import util
import operator
from functools import reduce
class ClientMessageType:
""" Enum to define type of message sent to external client. Each may be array of length n."""
# No client message type to be sent, for backwards compatibility - virtual machine relies on this value
NoType = 0
# 3 x sint x n
TripleShares = 1
# 1 x cint x n
ClearModpInt = 2
# 1 x regint x n
Int32 = 3
# 1 x cint (fixed point left shifted by precision) x n
ClearModpFix = 4
class MPCThread(object):
def __init__(self, target, name, args = [], runtime_arg = None):
""" Create a thread from a callable object. """
if not callable(target):
raise CompilerError('Target %s for thread %s is not callable' % (target,name))
self.name = name
self.tape = Tape(program.name + '-' + name, program)
self.target = target
self.args = args
self.runtime_arg = runtime_arg
self.running = 0
def start(self, runtime_arg = None):
self.running += 1
program.start_thread(self, runtime_arg or self.runtime_arg)
def join(self):
if not self.running:
raise CompilerError('Thread %s is not running' % self.name)
self.running -= 1
program.stop_thread(self)
def vectorize(operation):
def vectorized_operation(self, *args, **kwargs):
if len(args):
if (isinstance(args[0], Tape.Register) or isinstance(args[0], sfloat)) \
and args[0].size != self.size:
raise CompilerError('Different vector sizes of operands')
set_global_vector_size(self.size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorize_max(operation):
def vectorized_operation(self, *args, **kwargs):
size = self.size
for arg in args:
try:
size = max(size, arg.size)
except AttributeError:
pass
set_global_vector_size(size)
res = operation(self, *args, **kwargs)
reset_global_vector_size()
return res
return vectorized_operation
def vectorized_classmethod(function):
def vectorized_function(cls, *args, **kwargs):
size = None
if 'size' in kwargs:
size = kwargs.pop('size')
if size:
set_global_vector_size(size)
res = function(cls, *args, **kwargs)
reset_global_vector_size()
else:
res = function(cls, *args, **kwargs)
return res
return classmethod(vectorized_function)
def vectorize_init(function):
def vectorized_init(*args, **kwargs):
size = None
if len(args) > 1 and (isinstance(args[1], Tape.Register) or \
isinstance(args[1], sfloat)):
size = args[1].size
if 'size' in kwargs and kwargs['size'] is not None \
and kwargs['size'] != size:
raise CompilerError('Mismatch in vector size')
if 'size' in kwargs and kwargs['size']:
size = kwargs['size']
if size is not None:
set_global_vector_size(size)
res = function(*args, **kwargs)
reset_global_vector_size()
else:
res = function(*args, **kwargs)
return res
return vectorized_init
def set_instruction_type(operation):
def instruction_typed_operation(self, *args, **kwargs):
set_global_instruction_type(self.instruction_type)
res = operation(self, *args, **kwargs)
reset_global_instruction_type()
return res
return instruction_typed_operation
def read_mem_value(operation):
def read_mem_operation(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], MemValue):
args = (args[0].read(),) + args[1:]
return operation(self, *args, **kwargs)
return read_mem_operation
class _number(object):
def square(self):
return self * self
def __add__(self, other):
if other is 0:
return self
else:
return self.add(other)
def __mul__(self, other):
if other is 0:
return 0
elif other is 1:
return self
else:
return self.mul(other)
__radd__ = __add__
__rmul__ = __mul__
@vectorize
def __pow__(self, exp):
if isinstance(exp, int) and exp >= 0:
if exp == 0:
return self.__class__(1)
exp = bin(exp)[3:]
res = self
for i in exp:
res = res.square()
if i == '1':
res *= self
return res
else:
return NotImplemented
def mul_no_reduce(self, other, res_params=None):
return self * other
def reduce_after_mul(self):
return self
def pow2(self, bit_length=None, security=None):
return 2**self
def min(self, other):
return (self < other).if_else(self, other)
def max(self, other):
return (self < other).if_else(other, self)
class _int(object):
def if_else(self, a, b):
if hasattr(a, 'for_mux'):
f, a, b = a.for_mux(b)
else:
f = lambda x: x
return f(self * (a - b) + b)
def cond_swap(self, a, b):
prod = self * (a - b)
return a - prod, b + prod
def bit_xor(self, other):
return self + other - 2 * self * other
class _gf2n(object):
def if_else(self, a, b):
return b ^ self * self.hard_conv(a ^ b)
def cond_swap(self, a, b, t=None):
prod = self * self.hard_conv(a ^ b)
res = a ^ prod, b ^ prod
if t is None:
return res
else:
return tuple(t.conv(r) for r in res)
def bit_xor(self, other):
return self ^ other
class _structure(object):
MemValue = classmethod(lambda cls, value: MemValue(cls.conv(value)))
@classmethod
def Array(cls, size, *args, **kwargs):
return Array(size, cls, *args, **kwargs)
@classmethod
def Matrix(cls, rows, columns, *args, **kwargs):
return Matrix(rows, columns, cls, *args, **kwargs)
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
return sum(row[k].mul_no_reduce(matrix[k].get_vector(),
res_params) \
for k in range(len(row))).reduce_after_mul()
class _register(Tape.Register, _number, _structure):
@staticmethod
def n_elements():
return 1
@vectorized_classmethod
def conv(cls, val):
if isinstance(val, MemValue):
val = val.read()
if isinstance(val, cls):
return val
elif not isinstance(val, _register):
try:
return type(val)(cls.conv(v) for v in val)
except TypeError:
pass
except CompilerError:
pass
return cls(val)
@vectorized_classmethod
@read_mem_value
def hard_conv(cls, val):
if type(val) == cls:
return val
elif not isinstance(val, _register):
try:
return val.hard_conv_me(cls)
except AttributeError:
try:
return type(val)(cls.hard_conv(v) for v in val)
except TypeError:
pass
return cls(val)
@vectorized_classmethod
@set_instruction_type
def _load_mem(cls, address, direct_inst, indirect_inst):
res = cls()
if isinstance(address, _register):
indirect_inst(res, cls._expand_address(address,
get_global_vector_size()))
else:
direct_inst(res, address)
return res
@staticmethod
def _expand_address(address, size):
address = regint.conv(address)
if size > 1 and address.size == 1:
res = regint(size=size)
for i in range(size):
movint(res[i], address + regint(i, size=1))
return res
else:
return address
@set_instruction_type
def _store_in_mem(self, address, direct_inst, indirect_inst):
if isinstance(address, _register):
indirect_inst(self, self._expand_address(address, self.size))
else:
direct_inst(self, address)
@classmethod
def prep_res(cls, other):
return cls()
@staticmethod
def bit_compose(bits):
return sum(b << i for i,b in enumerate(bits))
@classmethod
def malloc(cls, size):
return program.malloc(size, cls)
@set_instruction_type
def __init__(self, reg_type, val, size):
if isinstance(val, (tuple, list)):
size = len(val)
super(_register, self).__init__(reg_type, program.curr_tape, size=size)
if isinstance(val, int):
self.load_int(val)
elif isinstance(val, (tuple, list)):
for i, x in enumerate(val):
self.mov(self[i], type(self)(x, size=1))
elif val is not None:
self.load_other(val)
def sizeof(self):
return self.size
def extend(self, n):
return self
def expand_to_vector(self, size=None):
if size is None:
size = get_global_vector_size()
if self.size == size:
return self
assert self.size == 1
res = type(self)(size=size)
for i in range(size):
movs(res[i], self)
return res
class _clear(_register):
__slots__ = []
mov = staticmethod(movc)
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemc(regint(start), regint(end))
@set_instruction_type
@vectorize
def load_other(self, val):
if isinstance(val, type(self)):
movc(self, val)
else:
self.convert_from(val)
@vectorize
@read_mem_value
def convert_from(self, val):
if not isinstance(val, regint):
val = regint(val)
convint(self, val)
@set_instruction_type
@vectorize
def print_reg(self, comment=''):
print_reg(self, comment)
@set_instruction_type
@vectorize
def print_reg_plain(self):
print_reg_plain(self)
@set_instruction_type
@vectorize
def raw_output(self):
raw_output(self)
@set_instruction_type
@read_mem_value
@vectorize
def clear_op(self, other, c_inst, ci_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, cls):
c_inst(res, self, other)
elif isinstance(other, int):
if self.in_immediate_range(other):
ci_inst(res, self, other)
else:
if reverse:
c_inst(res, cls(other), self)
else:
c_inst(res, self, cls(other))
else:
return NotImplemented
return res
@set_instruction_type
@read_mem_value
@vectorize
def coerce_op(self, other, inst, reverse=False):
cls = self.__class__
res = cls()
if isinstance(other, int):
other = cls(other)
elif not isinstance(other, cls):
return NotImplemented
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.clear_op(other, addc, addci)
def mul(self, other):
return self.clear_op(other, mulc, mulci)
def __sub__(self, other):
return self.clear_op(other, subc, subci)
def __rsub__(self, other):
return self.clear_op(other, subc, subcfi, True)
def __truediv__(self, other):
return self.clear_op(other, divc, divci)
def __rtruediv__(self, other):
return self.coerce_op(other, divc, True)
def __eq__(self, other):
if isinstance(other, (_clear,int)):
return regint(self) == other
else:
return NotImplemented
def __ne__(self, other):
return 1 - (self == other)
def __and__(self, other):
return self.clear_op(other, andc, andci)
def __xor__(self, other):
return self.clear_op(other, xorc, xorci)
def __or__(self, other):
return self.clear_op(other, orc, orci)
__rand__ = __and__
__rxor__ = __xor__
__ror__ = __or__
def reveal(self):
return self
class cint(_clear, _int):
" Clear mod p integer type. """
__slots__ = []
instruction_type = 'modp'
reg_type = 'c'
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
res = [cls() for i in range(n)]
readsocketc(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketc(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of modp integers to socket """
writesocketc(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmc, ldmci)
def store_in_mem(self, address):
self._store_in_mem(address, stmc, stmci)
@staticmethod
def in_immediate_range(value):
return value < 2**31 and value >= -2**31
def __init__(self, val=None, size=None):
super(cint, self).__init__('c', val=val, size=size)
@vectorize
def load_int(self, val):
if val:
# +1 for sign
program.curr_tape.require_bit_length(1 + int(math.ceil(math.log(abs(val)))))
if self.in_immediate_range(val):
ldi(self, val)
else:
max = 2**31 - 1
sign = abs(val) // val
val = abs(val)
chunks = []
while val:
mod = val % max
val = (val - mod) // max
chunks.append(mod)
sum = cint(sign * chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum *= max
if i == len(chunks) - 1:
addci(self, sum, sign * chunk)
elif chunk:
sum += sign * chunk
def to_regint(self, n_bits=None, dest=None):
dest = regint() if dest is None else dest
convmodp(dest, self, bitlength=n_bits)
return dest
def __mod__(self, other):
return self.clear_op(other, modc, modci)
def __rmod__(self, other):
return self.coerce_op(other, modc, True)
def __lt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) < other
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, (type(self),int)):
return regint(self) > other
else:
return NotImplemented
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
@vectorize
def __eq__(self, other):
if not isinstance(other, (_clear, int)):
return NotImplemented
res = 1
remaining = program.bit_length
while remaining > 0:
if isinstance(other, cint):
o = other.to_regint(min(remaining, 64))
else:
o = other % 2 ** 64
res *= (self.to_regint(min(remaining, 64)) == o)
self >>= 64
other >>= 64
remaining -= 64
return res
def __lshift__(self, other):
return self.clear_op(other, shlc, shlci)
def __rshift__(self, other):
return self.clear_op(other, shrc, shrci)
def __neg__(self):
return 0 - self
def __abs__(self):
return (self >= 0).if_else(self, -self)
@vectorize
def __invert__(self):
res = cint()
notc(res, self, program.bit_length)
return res
def __rpow__(self, base):
if base == 2:
return 1 << self
else:
return NotImplemented
@vectorize
def __rlshift__(self, other):
return cint(other) << self
@vectorize
def __rrshift__(self, other):
return cint(other) >> self
@read_mem_value
def mod2m(self, other, bit_length=None, signed=None):
return self % 2**other
@read_mem_value
def right_shift(self, other, bit_length=None):
return self >> other
@read_mem_value
def greater_than(self, other, bit_length=None):
return self > other
def bit_decompose(self, bit_length=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
return floatingpoint.bits(self, bit_length)
def legendre(self):
res = cint()
legendrec(res, self)
return res
def digest(self, num_bytes):
res = cint()
digestc(res, self, num_bytes)
return res
def print_if(self, string):
cond_print_str(self, string)
class cgf2n(_clear, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
reg_type = 'cg'
@classmethod
def bit_compose(cls, bits, step=None):
size = bits[0].size
res = cls(size=size)
vgbitcom(size, res, step or 1, *bits)
return res
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldmc, gldmci)
def store_in_mem(self, address):
self._store_in_mem(address, gstmc, gstmci)
@staticmethod
def in_immediate_range(value):
return value < 2**32 and value >= 0
def __init__(self, val=None, size=None):
super(cgf2n, self).__init__('cg', val=val, size=size)
@vectorize
def load_int(self, val):
if val < 0:
raise CompilerError('Negative GF2n immediate')
if self.in_immediate_range(val):
gldi(self, val)
else:
chunks = []
while val:
mod = val % 2**32
val >>= 32
chunks.append(mod)
sum = cgf2n(chunks.pop())
for i,chunk in enumerate(reversed(chunks)):
sum <<= 32
if i == len(chunks) - 1:
gaddci(self, sum, chunk)
elif chunk:
sum += chunk
def __mul__(self, other):
return super(cgf2n, self).__mul__(other)
def __neg__(self):
return self
@vectorize
def __invert__(self):
res = cgf2n()
gnotc(res, self)
return res
@vectorize
def __lshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshlci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def __rshift__(self, other):
if isinstance(other, int):
res = cgf2n()
gshrci(res, self, other)
return res
else:
return NotImplemented
@vectorize
def bit_decompose(self, bit_length=None, step=None):
bit_length = bit_length or program.galois_length
step = step or 1
res = [type(self)() for _ in range(bit_length // step)]
gbitdec(self, step, *res)
return res
class regint(_register, _int):
__slots__ = []
reg_type = 'ci'
instruction_type = 'modp'
mov = staticmethod(movint)
@classmethod
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmemint(regint(start), regint(end))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldmint, ldminti)
def store_in_mem(self, address):
self._store_in_mem(address, stmint, stminti)
@vectorized_classmethod
def pop(cls):
res = cls()
popint(res)
return res
@vectorized_classmethod
def push(cls, value):
pushint(cls.conv(value))
@vectorized_classmethod
def get_random(cls, bit_length):
""" Public insecure randomness """
if isinstance(bit_length, int):
bit_length = regint(bit_length)
res = cls()
rand(res, bit_length)
return res
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Receive n register values from socket """
res = [cls() for i in range(n)]
readsocketint(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorized_classmethod
def read_client_public_key(cls, client_id):
""" Receive 8 register values from socket containing client public key."""
res = [cls() for i in range(8)]
readclientpublickey(client_id, *res)
return res
@vectorized_classmethod
def init_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
""" Use 8 register values containing client public key."""
initsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorized_classmethod
def resp_secure_socket(cls, client_id, w1, w2, w3, w4, w5, w6, w7, w8):
""" Receive 8 register values from socket containing client public key."""
respsecuresocket(client_id, w1, w2, w3, w4, w5, w6, w7, w8)
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
writesocketint(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of integers to socket """
writesocketint(client_id, message_type, *values)
@vectorize_init
def __init__(self, val=None, size=None):
super(regint, self).__init__(self.reg_type, val=val, size=size)
def load_int(self, val):
if cint.in_immediate_range(val):
ldint(self, val)
else:
lower = val % 2**32
upper = val >> 32
if lower >= 2**31:
lower -= 2**32
upper += 1
addint(self, regint(upper) * regint(2**16)**2, regint(lower))
@read_mem_value
def load_other(self, val):
if isinstance(val, cgf2n):
gconvgf2n(self, val)
elif isinstance(val, regint):
addint(self, val, regint(0))
else:
try:
val.to_regint(dest=self)
except AttributeError:
raise CompilerError("Cannot convert '%s' to integer" % \
type(val))
@vectorize
@read_mem_value
def int_op(self, other, inst, reverse=False):
try:
other = self.conv(other)
except:
return NotImplemented
res = regint()
if reverse:
inst(res, other, self)
else:
inst(res, self, other)
return res
def add(self, other):
return self.int_op(other, addint)
def __sub__(self, other):
return self.int_op(other, subint)
def __rsub__(self, other):
return self.int_op(other, subint, True)
def mul(self, other):
return self.int_op(other, mulint)
def __neg__(self):
return 0 - self
def __floordiv__(self, other):
return self.int_op(other, divint)
def __rfloordiv__(self, other):
return self.int_op(other, divint, True)
__truediv__ = __floordiv__
__rtruediv__ = __rfloordiv__
def __mod__(self, other):
return self - (self / other) * other
def __rmod__(self, other):
return regint(other) % self
def __rpow__(self, other):
return other**cint(self)
def __eq__(self, other):
return self.int_op(other, eqc)
def __ne__(self, other):
return 1 - (self == other)
def __lt__(self, other):
return self.int_op(other, ltc)
def __gt__(self, other):
return self.int_op(other, gtc)
def __le__(self, other):
return 1 - (self > other)
def __ge__(self, other):
return 1 - (self < other)
def __lshift__(self, other):
if isinstance(other, int):
return self * 2**other
else:
return regint(cint(self) << other)
def __rshift__(self, other):
if isinstance(other, int):
return self / 2**other
else:
return regint(cint(self) >> other)
def __rlshift__(self, other):
return regint(other << cint(self))
def __rrshift__(self, other):
return regint(other >> cint(self))
def __and__(self, other):
return regint(other & cint(self))
def __or__(self, other):
return regint(other | cint(self))
def __xor__(self, other):
return regint(other ^ cint(self))
__rand__ = __and__
__ror__ = __or__
__rxor__ = __xor__
def mod2m(self, *args, **kwargs):
return cint(self).mod2m(*args, **kwargs)
@vectorize
def bit_decompose(self, bit_length=None):
bit_length = bit_length or min(64, program.bit_length)
if bit_length > 64:
raise CompilerError('too many bits demanded')
res = [regint() for i in range(bit_length)]
bitdecint(self, *res)
return res
@staticmethod
def bit_compose(bits):
two = regint(2)
res = 0
for bit in reversed(bits):
res *= two
res += bit
return res
def reveal(self):
return self
def print_reg_plain(self):
print_int(self)
def print_if(self, string):
cint(self).print_if(string)
class localint(object):
""" Local integer that must prevented from leaking into the secure
computation. Uses regint internally. """
def __init__(self, value=None):
self._v = regint(value)
self.size = 1
def output(self):
self._v.print_reg_plain()
__lt__ = lambda self, other: localint(self._v < other)
__le__ = lambda self, other: localint(self._v <= other)
__gt__ = lambda self, other: localint(self._v > other)
__ge__ = lambda self, other: localint(self._v >= other)
__eq__ = lambda self, other: localint(self._v == other)
__ne__ = lambda self, other: localint(self._v != other)
class _secret(_register):
__slots__ = []
mov = staticmethod(movs)
PreOR = staticmethod(lambda l: floatingpoint.PreORC(l))
PreOp = staticmethod(lambda op, l: floatingpoint.PreOpL(op, l))
@vectorized_classmethod
@set_instruction_type
def protect_memory(cls, start, end):
program.curr_tape.start_new_basicblock(name='protect-memory')
protectmems(regint(start), regint(end))
@vectorized_classmethod
@set_instruction_type
def get_input_from(cls, player):
""" Secret input """
res = cls()
asm_input(res, player)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_triple(cls):
""" Secret random triple according to security model """
res = (cls(), cls(), cls())
triple(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_bit(cls):
""" Secret random bit according to security model """
res = cls()
bit(res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_square(cls):
""" Secret random square according to security model """
res = (cls(), cls())
square(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_inverse(cls):
""" Secret random inverse according to security model """
res = (cls(), cls())
inverse(*res)
return res
@vectorized_classmethod
@set_instruction_type
def get_random_input_mask_for(cls, player):
res = cls()
inputmask(res, player)
return res
@classmethod
@set_instruction_type
def dot_product(cls, x, y):
x = list(x)
set_global_vector_size(x[0].size)
res = cls()
dotprods(res, x, y)
reset_global_vector_size()
return res
@classmethod
@set_instruction_type
def row_matrix_mul(cls, row, matrix, res_params=None):
assert len(row) == len(matrix)
size = len(matrix[0])
res = cls(size=size)
dotprods(*sum(([res[j], row, [matrix[k][j] for k in range(len(row))]]
for j in range(size)), []))
return res
@classmethod
@set_instruction_type
def matrix_mul(cls, A, B, n, res_params=None):
assert len(A) % n == 0
assert len(B) % n == 0
size = len(A) * len(B) // n**2
res = cls(size=size)
n_rows = len(A) // n
n_cols = len(B) // n
dotprods(*sum(([res[j], [A[j // n_cols * n + k] for k in range(n)],
[B[k * n_cols + j % n_cols] for k in range(n)]]
for j in range(size)), []))
return res
def __init__(self, reg_type, val=None, size=None):
if isinstance(val, self.clear_type):
size = val.size
super(_secret, self).__init__(reg_type, val=val, size=size)
@set_instruction_type
@vectorize
def load_int(self, val):
if self.clear_type.in_immediate_range(val):
ldsi(self, val)
else:
self.load_clear(self.clear_type(val))
@vectorize
def load_clear(self, val):
addm(self, self.__class__(0), val)
@set_instruction_type
@read_mem_value
@vectorize
def load_other(self, val):
if isinstance(val, self.clear_type):
self.load_clear(val)
elif isinstance(val, type(self)):
movs(self, val)
else:
self.load_clear(self.clear_type(val))
def _new_by_number(self, i):
res = type(self)(size=1)
res.i = i
res.program = self.program
return res
@set_instruction_type
@read_mem_value
@vectorize
def secret_op(self, other, s_inst, m_inst, si_inst, reverse=False):
cls = self.__class__
res = self.prep_res(other)
if isinstance(other, regint):
other = res.clear_type(other)
if isinstance(other, cls):
s_inst(res, self, other)
elif isinstance(other, res.clear_type):
if reverse:
m_inst(res, other, self)
else:
m_inst(res, self, other)
elif isinstance(other, int):
if self.clear_type.in_immediate_range(other):
si_inst(res, self, other)
else:
if reverse:
m_inst(res, res.clear_type(other), self)
else:
m_inst(res, self, res.clear_type(other))
else:
return NotImplemented
return res
def add(self, other):
return self.secret_op(other, adds, addm, addsi)
@set_instruction_type
def mul(self, other):
if isinstance(other, _secret) and max(self.size, other.size) > 1 \
and min(self.size, other.size) == 1:
x, y = (other, self) if self.size < other.size else (self, other)
res = type(self)(size=x.size)
mulrs(res, x, y)
return res
return self.secret_op(other, muls, mulm, mulsi)
def __sub__(self, other):
return self.secret_op(other, subs, subml, subsi)
def __rsub__(self, other):
return self.secret_op(other, subs, submr, subsfi, True)
@vectorize
def __truediv__(self, other):
return self * (self.clear_type(1) / other)
@vectorize
def __rtruediv__(self, other):
a,b = self.get_random_inverse()
return other * a / (a * self).reveal()
@set_instruction_type
@vectorize
def square(self):
res = self.__class__()
sqrs(res, self)
return res
@set_instruction_type
@vectorize
def reveal(self):
res = self.clear_type()
asm_open(res, self)
return res
@set_instruction_type
def reveal_to(self, player):
masked = self.__class__()
startprivateoutput(masked, self, player)
stopprivateoutput(masked.reveal(), player)
class sint(_secret, _int):
" Shared mod p integer type. """
__slots__ = []
instruction_type = 'modp'
clear_type = cint
reg_type = 's'
PreOp = staticmethod(floatingpoint.PreOpL)
PreOR = staticmethod(floatingpoint.PreOR)
get_type = staticmethod(lambda n: sint)
@vectorized_classmethod
def get_random_int(cls, bits):
""" Secret random n-bit number according to security model """
res = sint()
comparison.PRandInt(res, bits)
return res
@vectorized_classmethod
def get_input_from(cls, player):
""" Secret input """
res = cls()
inputmixed('int', res, player)
return res
@classmethod
def get_raw_input_from(cls, player):
res = cls()
startinput(player, 1)
stopinput(player, res)
return res
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
""" Securely obtain shares of n values input by a client """
# send shares of a triple to client
triples = list(itertools.chain(*(sint.get_random_triple() for i in range(n))))
sint.write_shares_to_socket(client_id, triples, message_type)
received = cint.read_from_socket(client_id, n)
y = [0] * n
for i in range(n):
y[i] = received[i] - triples[i * 3]
return y
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Receive n shares and MAC shares from socket """
res = [cls() for i in range(n)]
readsockets(client_id, *res)
if n == 1:
return res[0]
else:
return res
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send share and MAC share to socket """
writesockets(client_id, message_type, self)
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of shares and MAC shares to socket """
writesockets(client_id, message_type, *values)
@vectorize
def write_share_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send only share to socket """
writesocketshare(client_id, message_type, self)
@vectorized_classmethod
def write_shares_to_socket(cls, client_id, values, message_type=ClientMessageType.NoType, include_macs=False):
""" Send shares of a list of values to a specified client socket """
if include_macs:
writesockets(client_id, message_type, *values)
else:
writesocketshare(client_id, message_type, *values)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, ldms, ldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, stms, stmsi)
def __init__(self, val=None, size=None):
super(sint, self).__init__('s', val=val, size=size)
@vectorize
def __neg__(self):
return 0 - self
@vectorize
def __abs__(self):
return (self >= 0).if_else(self, -self)
@read_mem_value
@vectorize
def __lt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, self - other,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
@read_mem_value
@vectorize
def __gt__(self, other, bit_length=None, security=None):
res = sint()
comparison.LTZ(res, other - self,
(bit_length or program.bit_length) + 1,
security or program.security)
return res
def | (self, other, bit_length=None, security=None):
return 1 - self.greater_than(other, bit_length, security)
def __ge__(self, other, bit_length=None, security=None):
return 1 - self.less_than(other, bit_length, security)
@read_mem_value
@vectorize
def __eq__(self, other, bit_length=None, security=None):
return floatingpoint.EQZ(self - other, bit_length or program.bit_length,
security or program.security)
def __ne__(self, other, bit_length=None, security=None):
return 1 - self.equal(other, bit_length, security)
less_than = __lt__
greater_than = __gt__
less_equal = __le__
greater_equal = __ge__
equal = __eq__
not_equal = __ne__
@vectorize
def __mod__(self, modulus):
if isinstance(modulus, int):
l = math.log(modulus, 2)
if 2**int(round(l)) == modulus:
return self.mod2m(int(l))
raise NotImplementedError('Modulo only implemented for powers of two.')
@read_mem_value
def mod2m(self, m, bit_length=None, security=None, signed=True):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(m, int):
if m == 0:
return 0
if m >= bit_length:
return self
res = sint()
comparison.Mod2m(res, self, bit_length, m, security, signed)
else:
res, pow2 = floatingpoint.Trunc(self, bit_length, m, security, True)
return res
@vectorize
def __rpow__(self, base):
if base == 2:
return self.pow2()
else:
return NotImplemented
@vectorize
def pow2(self, bit_length=None, security=None):
return floatingpoint.Pow2(self, bit_length or program.bit_length, \
security or program.security)
def __lshift__(self, other, bit_length=None, security=None):
return self * util.pow2_value(other, bit_length, security)
@vectorize
@read_mem_value
def __rshift__(self, other, bit_length=None, security=None):
bit_length = bit_length or program.bit_length
security = security or program.security
if isinstance(other, int):
if other == 0:
return self
res = sint()
comparison.Trunc(res, self, bit_length, other, security, True)
return res
elif isinstance(other, sint):
return floatingpoint.Trunc(self, bit_length, other, security)
else:
return floatingpoint.Trunc(self, bit_length, sint(other), security)
left_shift = __lshift__
right_shift = __rshift__
def __rlshift__(self, other):
return other * 2**self
@vectorize
def __rrshift__(self, other):
return floatingpoint.Trunc(other, program.bit_length, self, program.security)
def bit_decompose(self, bit_length=None, security=None):
if bit_length == 0:
return []
bit_length = bit_length or program.bit_length
security = security or program.security
return floatingpoint.BitDec(self, bit_length, bit_length, security)
def TruncMul(self, other, k, m, kappa=None, nearest=False):
return (self * other).round(k, m, kappa, nearest, signed=True)
def TruncPr(self, k, m, kappa=None, signed=True):
return floatingpoint.TruncPr(self, k, m, kappa, signed=signed)
@vectorize
def round(self, k, m, kappa=None, nearest=False, signed=False):
kappa = kappa or program.security
secret = isinstance(m, sint)
if nearest:
if secret:
raise NotImplementedError()
return comparison.TruncRoundNearest(self, k, m, kappa,
signed=signed)
else:
if secret:
return floatingpoint.Trunc(self, k, m, kappa)
return self.TruncPr(k, m, kappa, signed=signed)
def Norm(self, k, f, kappa=None, simplex_flag=False):
return library.Norm(self, k, f, kappa, simplex_flag)
@vectorize
def int_div(self, other, bit_length=None, security=None):
k = bit_length or program.bit_length
kappa = security or program.security
tmp = library.IntDiv(self, other, k, kappa)
res = type(self)()
comparison.Trunc(res, tmp, 2 * k, k, kappa, True)
return res
@staticmethod
def two_power(n):
return floatingpoint.two_power(n)
class sgf2n(_secret, _gf2n):
__slots__ = []
instruction_type = 'gf2n'
clear_type = cgf2n
reg_type = 'sg'
@classmethod
def get_type(cls, length):
return cls
@classmethod
def get_raw_input_from(cls, player):
res = cls()
gstartinput(player, 1)
gstopinput(player, res)
return res
def add(self, other):
if isinstance(other, sgf2nint):
return NotImplemented
else:
return super(sgf2n, self).add(other)
def mul(self, other):
if isinstance(other, (sgf2nint)):
return NotImplemented
else:
return super(sgf2n, self).mul(other)
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._load_mem(address, gldms, gldmsi)
def store_in_mem(self, address):
self._store_in_mem(address, gstms, gstmsi)
def __init__(self, val=None, size=None):
super(sgf2n, self).__init__('sg', val=val, size=size)
def __neg__(self):
return self
@vectorize
def __invert__(self):
return self ^ cgf2n(2**program.galois_length - 1)
def __xor__(self, other):
if other is 0:
return self
else:
return super(sgf2n, self).add(other)
__rxor__ = __xor__
@vectorize
def __and__(self, other):
if isinstance(other, int):
other_bits = [(other >> i) & 1 \
for i in range(program.galois_length)]
else:
other_bits = other.bit_decompose()
self_bits = self.bit_decompose()
return sum((x * y) << i \
for i,(x,y) in enumerate(zip(self_bits, other_bits)))
__rand__ = __and__
@vectorize
def __lshift__(self, other):
return self * cgf2n(1 << other)
@vectorize
def right_shift(self, other, bit_length=None):
bits = self.bit_decompose(bit_length)
return sum(b << i for i,b in enumerate(bits[other:]))
def equal(self, other, bit_length=None, expand=1):
bits = [1 - bit for bit in (self - other).bit_decompose(bit_length)][::expand]
while len(bits) > 1:
bits.insert(0, bits.pop() * bits.pop())
return bits[0]
def not_equal(self, other, bit_length=None):
return 1 - self.equal(other, bit_length)
__eq__ = equal
__ne__ = not_equal
@vectorize
def bit_decompose(self, bit_length=None, step=1):
if bit_length == 0:
return []
bit_length = bit_length or program.galois_length
random_bits = [self.get_random_bit() \
for i in range(0, bit_length, step)]
one = cgf2n(1)
masked = sum([b * (one << (i * step)) for i,b in enumerate(random_bits)], self).reveal()
masked_bits = masked.bit_decompose(bit_length,step=step)
return [m + r for m,r in zip(masked_bits, random_bits)]
@vectorize
def bit_decompose_embedding(self):
random_bits = [self.get_random_bit() \
for i in range(8)]
one = cgf2n(1)
wanted_positions = [0, 5, 10, 15, 20, 25, 30, 35]
masked = sum([b * (one << wanted_positions[i]) for i,b in enumerate(random_bits)], self).reveal()
return [self.clear_type((masked >> wanted_positions[i]) & one) + r for i,r in enumerate(random_bits)]
for t in (sint, sgf2n):
t.bit_type = t
t.basic_type = t
t.default_type = t
class _bitint(object):
bits = None
log_rounds = False
linear_rounds = False
@classmethod
def bit_adder(cls, a, b, carry_in=0, get_carry=False):
a, b = list(a), list(b)
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
return cls.bit_adder_selection(a, b, carry_in=carry_in,
get_carry=get_carry)
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in)
elif cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
@classmethod
def carry_lookahead_adder(cls, a, b, fewer_inv=False, carry_in=0,
get_carry=False):
lower = []
for (ai,bi) in zip(a,b):
if ai is 0 or bi is 0:
lower.append(ai + bi)
a.pop(0)
b.pop(0)
else:
break
d = [cls.half_adder(ai, bi) for (ai,bi) in zip(a,b)]
carry = floatingpoint.carry
if fewer_inv:
pre_op = floatingpoint.PreOpL2
else:
pre_op = floatingpoint.PreOpL
if d:
carries = list(zip(*pre_op(carry, [(0, carry_in)] + d)))[1]
else:
carries = []
res = lower + cls.sum_from_carries(a, b, carries)
if get_carry:
res += [carries[-1]]
return res
@staticmethod
def sum_from_carries(a, b, carries):
return [ai.bit_xor(bi).bit_xor(carry) \
for (ai, bi, carry) in zip(a, b, carries)]
@classmethod
def carry_select_adder(cls, a, b, get_carry=False, carry_in=0):
a += [0] * (len(b) - len(a))
b += [0] * (len(a) - len(b))
n = len(a)
for m in range(100):
if sum(range(m + 1)) + 1 >= n:
break
for k in range(m, -1, -1):
if sum(range(m, k - 1, -1)) + 1 >= n:
break
blocks = list(range(m, k, -1))
blocks.append(n - sum(blocks))
blocks.reverse()
#print 'blocks:', blocks
if len(blocks) > 1 and blocks[0] > blocks[1]:
raise Exception('block size not increasing:', blocks)
if sum(blocks) != n:
raise Exception('blocks not summing up: %s != %s' % \
(sum(blocks), n))
res = []
carry = carry_in
cin_one = util.long_one(a + b)
for m in blocks:
aa = a[:m]
bb = b[:m]
a = a[m:]
b = b[m:]
cc = [cls.ripple_carry_adder(aa, bb, i) for i in (0, cin_one)]
for i in range(m):
res.append(util.if_else(carry, cc[1][i], cc[0][i]))
carry = util.if_else(carry, cc[1][m], cc[0][m])
if get_carry:
res += [carry]
return res
@classmethod
def ripple_carry_adder(cls, a, b, carry_in=0):
carry = carry_in
res = []
for aa, bb in zip(a, b):
cc, carry = cls.full_adder(aa, bb, carry)
res.append(cc)
res.append(carry)
return res
@staticmethod
def full_adder(a, b, carry):
s = a + b
return s + carry, util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
return a + b, a & b
@staticmethod
def bit_comparator(a, b):
long_one = util.long_one(a + b)
op = lambda y,x,*args: (util.if_else(x[1], x[0], y[0]), \
util.if_else(x[1], long_one, y[1]))
return floatingpoint.KOpL(op, [(bi, ai + bi) for (ai,bi) in zip(a,b)])
@classmethod
def bit_less_than(cls, a, b):
x, not_equal = cls.bit_comparator(a, b)
return util.if_else(not_equal, x, 0)
@staticmethod
def get_highest_different_bits(a, b, index):
diff = [ai + bi for (ai,bi) in reversed(list(zip(a,b)))]
preor = floatingpoint.PreOR(diff, raw=True)
highest_diff = [x - y for (x,y) in reversed(list(zip(preor, [0] + preor)))]
raw = sum(map(operator.mul, highest_diff, (a,b)[index]))
return raw.bit_decompose()[0]
def load_int(self, other):
if -2**(self.n_bits-1) <= other < 2**(self.n_bits-1):
self.bin_type.load_int(self, other + 2**self.n_bits \
if other < 0 else other)
else:
raise CompilerError('Invalid signed %d-bit integer: %d' % \
(self.n_bits, other))
def add(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear addition')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
return self.compose(self.bit_adder(a, b))
def mul(self, other):
if type(other) == self.bin_type:
raise CompilerError('Unclear multiplication')
self_bits = self.bit_decompose()
if isinstance(other, int):
other_bits = util.bit_decompose(other, self.n_bits)
bit_matrix = [[x * y for y in self_bits] for x in other_bits]
else:
try:
other_bits = other.bit_decompose()
if len(other_bits) == 1:
return type(self)(other_bits[0] * self)
if len(self_bits) != len(other_bits):
raise NotImplementedError('Multiplication of different lengths')
except AttributeError:
pass
try:
other = self.bin_type(other)
except CompilerError:
return NotImplemented
products = [x * other for x in self_bits]
bit_matrix = [util.bit_decompose(x, self.n_bits) for x in products]
return self.compose(self.wallace_tree_from_matrix(bit_matrix, False))
@classmethod
def wallace_tree_from_matrix(cls, bit_matrix, get_carry=True):
columns = [[_f for _f in (bit_matrix[j][i-j] \
for j in range(min(len(bit_matrix), i + 1))) if _f] \
for i in range(len(bit_matrix[0]))]
return cls.wallace_tree_from_columns(columns, get_carry)
@classmethod
def wallace_tree_from_columns(cls, columns, get_carry=True):
self = cls
while max(len(c) for c in columns) > 2:
new_columns = [[] for i in range(len(columns) + 1)]
for i,col in enumerate(columns):
while len(col) > 2:
s, carry = self.full_adder(*(col.pop() for i in range(3)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
if len(col) == 2:
s, carry = self.half_adder(*(col.pop() for i in range(2)))
new_columns[i].append(s)
new_columns[i+1].append(carry)
else:
new_columns[i].extend(col)
if get_carry:
columns = new_columns
else:
columns = new_columns[:-1]
for col in columns:
col.extend([0] * (2 - len(col)))
return self.bit_adder(*list(zip(*columns)))
@classmethod
def wallace_tree(cls, rows):
return cls.wallace_tree_from_columns([list(x) for x in zip(*rows)])
def __sub__(self, other):
if type(other) == sgf2n:
raise CompilerError('Unclear subtraction')
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
d = [(1 + ai + bi, (1 - ai) * bi) for (ai,bi) in zip(a,b)]
borrow = lambda y,x,*args: \
(x[0] * y[0], 1 - (1 - x[1]) * (1 - x[0] * y[1]))
borrows = (0,) + list(zip(*floatingpoint.PreOpL(borrow, d)))[1]
return self.compose(ai + bi + borrow \
for (ai,bi,borrow) in zip(a,b,borrows))
def __rsub__(self, other):
raise NotImplementedError()
def __truediv__(self, other):
raise NotImplementedError()
def __truerdiv__(self, other):
raise NotImplementedError()
def __lshift__(self, other):
return self.compose(([0] * other + self.bit_decompose())[:self.n_bits])
def __rshift__(self, other):
return self.compose(self.bit_decompose()[other:])
def bit_decompose(self, n_bits=None, *args):
if self.bits is None:
self.bits = self.force_bit_decompose(self.n_bits)
if n_bits is None:
return self.bits[:]
else:
return self.bits[:n_bits] + [self.fill_bit()] * (n_bits - self.n_bits)
def fill_bit(self):
return self.bits[-1]
@staticmethod
def prep_comparison(a, b):
a[-1], b[-1] = b[-1], a[-1]
def comparison(self, other, const_rounds=False, index=None):
a = self.bit_decompose()
b = util.bit_decompose(other, self.n_bits)
self.prep_comparison(a, b)
if const_rounds:
return self.get_highest_different_bits(a, b, index)
else:
return self.bit_comparator(a, b)
def __lt__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 0)
else:
return self.comparison(other, True, 1)
def __le__(self, other):
if program.options.comparison == 'log':
x, not_equal = self.comparison(other)
return util.if_else(not_equal, x, 1)
else:
return 1 - self.comparison(other, True, 0)
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return 1 - (self <= other)
def __eq__(self, other):
diff = self ^ other
diff_bits = [1 - x for x in diff.bit_decompose()]
return floatingpoint.KMul(diff_bits)
def __ne__(self, other):
return 1 - (self == other)
def __neg__(self):
return 1 + self.compose(1 ^ b for b in self.bit_decompose())
def __abs__(self):
return util.if_else(self.bit_decompose()[-1], -self, self)
less_than = lambda self, other, *args, **kwargs: self < other
greater_than = lambda self, other, *args, **kwargs: self > other
less_equal = lambda self, other, *args, **kwargs: self <= other
greater_equal = lambda self, other, *args, **kwargs: self >= other
equal = lambda self, other, *args, **kwargs: self == other
not_equal = lambda self, other, *args, **kwargs: self != other
class intbitint(_bitint, sint):
@staticmethod
def full_adder(a, b, carry):
s = a.bit_xor(b)
return s.bit_xor(carry), util.if_else(s, carry, a)
@staticmethod
def half_adder(a, b):
carry = a * b
return a + b - 2 * carry, carry
@staticmethod
def sum_from_carries(a, b, carries):
return [a[i] + b[i] + carries[i] - 2 * carries[i + 1] \
for i in range(len(a))]
@classmethod
def bit_adder_selection(cls, a, b, carry_in=0, get_carry=False):
if cls.linear_rounds:
return cls.ripple_carry_adder(a, b, carry_in=carry_in)
# experimental cut-off with dead code elimination
elif len(a) < 122 or cls.log_rounds:
return cls.carry_lookahead_adder(a, b, carry_in=carry_in,
get_carry=get_carry)
else:
return cls.carry_select_adder(a, b, carry_in=carry_in)
class sgf2nint(_bitint, sgf2n):
bin_type = sgf2n
@classmethod
def compose(cls, bits):
bits = list(bits)
if len(bits) > cls.n_bits:
raise CompilerError('Too many bits')
res = cls()
res.bits = bits + [0] * (cls.n_bits - len(bits))
gmovs(res, sum(b << i for i,b in enumerate(bits)))
return res
def load_other(self, other):
if isinstance(other, sgf2nint):
gmovs(self, self.compose(other.bit_decompose(self.n_bits)))
elif isinstance(other, sgf2n):
gmovs(self, other)
else:
gaddm(self, sgf2n(0), cgf2n(other))
def force_bit_decompose(self, n_bits=None):
return sgf2n(self).bit_decompose(n_bits)
class sgf2nuint(sgf2nint):
def load_int(self, other):
if 0 <= other < 2**self.n_bits:
sgf2n.load_int(self, other)
else:
raise CompilerError('Invalid unsigned %d-bit integer: %d' % \
(self.n_bits, other))
def fill_bit(self):
return 0
@staticmethod
def prep_comparison(a, b):
pass
class sgf2nuint32(sgf2nuint):
n_bits = 32
class sgf2nint32(sgf2nint):
n_bits = 32
def get_sgf2nint(n):
class sgf2nint_spec(sgf2nint):
n_bits = n
#sgf2nint_spec.__name__ = 'sgf2unint' + str(n)
return sgf2nint_spec
def get_sgf2nuint(n):
class sgf2nuint_spec(sgf2nint):
n_bits = n
#sgf2nuint_spec.__name__ = 'sgf2nuint' + str(n)
return sgf2nuint_spec
class sgf2nfloat(sgf2n):
@classmethod
def set_precision(cls, vlen, plen):
cls.vlen = vlen
cls.plen = plen
class v_type(sgf2nuint):
n_bits = 2 * vlen + 1
class p_type(sgf2nint):
n_bits = plen
class pdiff_type(sgf2nuint):
n_bits = plen
cls.v_type = v_type
cls.p_type = p_type
cls.pdiff_type = pdiff_type
def __init__(self, val, p=None, z=None, s=None):
super(sgf2nfloat, self).__init__()
if p is None and type(val) == sgf2n:
bits = val.bit_decompose(self.vlen + self.plen + 1)
self.v = self.v_type.compose(bits[:self.vlen])
self.p = self.p_type.compose(bits[self.vlen:-1])
self.s = bits[-1]
self.z = util.tree_reduce(operator.mul, (1 - b for b in self.v.bits))
else:
if p is None:
v, p, z, s = sfloat.convert_float(val, self.vlen, self.plen)
# correct sfloat
p += self.vlen - 1
v_bits = util.bit_decompose(v, self.vlen)
p_bits = util.bit_decompose(p, self.plen)
self.v = self.v_type.compose(v_bits)
self.p = self.p_type.compose(p_bits)
self.z = z
self.s = s
else:
self.v, self.p, self.z, self.s = val, p, z, s
v_bits = val.bit_decompose()[:self.vlen]
p_bits = p.bit_decompose()[:self.plen]
gmovs(self, util.bit_compose(v_bits + p_bits + [self.s]))
def add(self, other):
a = self.p < other.p
b = self.p == other.p
c = self.v < other.v
other_dominates = (b.if_else(c, a))
pmax, pmin = a.cond_swap(self.p, other.p, self.p_type)
vmax, vmin = other_dominates.cond_swap(self.v, other.v, self.v_type)
s3 = self.s ^ other.s
pdiff = self.pdiff_type(pmax - pmin)
d = self.vlen < pdiff
pow_delta = util.pow2(d.if_else(0, pdiff).bit_decompose(util.log2(self.vlen)))
v3 = vmax
v4 = self.v_type(sgf2n(vmax) * pow_delta) + self.v_type(s3.if_else(-vmin, vmin))
v = self.v_type(sgf2n(d.if_else(v3, v4) << self.vlen) / pow_delta)
v >>= self.vlen - 1
h = floatingpoint.PreOR(v.bits[self.vlen+1::-1])
tmp = sum(util.if_else(b, 0, 1 << i) for i,b in enumerate(h))
pow_p0 = 1 + self.v_type(tmp)
v = (v * pow_p0) >> 2
p = pmax - sum(self.p_type.compose([1 - b]) for b in h) + 1
v = self.z.if_else(other.v, other.z.if_else(self.v, v))
z = v == 0
p = z.if_else(0, self.z.if_else(other.p, other.z.if_else(self.p, p)))
s = other_dominates.if_else(other.s, self.s)
s = self.z.if_else(other.s, other.z.if_else(self.s, s))
return sgf2nfloat(v, p, z, s)
def mul(self, other):
v = (self.v * other.v) >> (self.vlen - 1)
b = v.bits[self.vlen]
v = b.if_else(v >> 1, v)
p = self.p + other.p + self.p_type.compose([b])
s = self.s + other.s
z = util.or_op(self.z, other.z)
return sgf2nfloat(v, p, z, s)
sgf2nfloat.set_precision(24, 8)
def parse_type(other, k=None, f=None):
# converts type to cfix/sfix depending on the case
if isinstance(other, cfix.scalars):
return cfix(other, k=k, f=f)
elif isinstance(other, cint):
tmp = cfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sint):
tmp = sfix()
tmp.load_int(other)
return tmp
elif isinstance(other, sfloat):
tmp = sfix(other)
return tmp
else:
return other
class cfix(_number, _structure):
""" Clear fixed point type. """
__slots__ = ['value', 'f', 'k', 'size']
reg_type = 'c'
scalars = (int, float, regint)
@classmethod
def set_precision(cls, f, k = None):
# k is the whole bitlength of fixed point
# f is the bitlength of decimal part
cls.f = f
if k is None:
cls.k = 2 * f
else:
cls.k = k
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
res = []
res.append(cint.load_mem(address))
return cfix(*res)
@vectorized_classmethod
def read_from_socket(cls, client_id, n=1):
""" Read one or more cfix values from a socket.
Sender will have already bit shifted and sent as cints."""
cint_input = cint.read_from_socket(client_id, n)
if n == 1:
return cfix(cint_inputs)
else:
return list(map(cfix, cint_inputs))
@vectorize
def write_to_socket(self, client_id, message_type=ClientMessageType.NoType):
""" Send cfix to socket. Value is sent as bit shifted cint. """
writesocketc(client_id, message_type, cint(self.v))
@vectorized_classmethod
def write_to_socket(self, client_id, values, message_type=ClientMessageType.NoType):
""" Send a list of cfix values to socket. Values are sent as bit shifted cints. """
def cfix_to_cint(fix_val):
return cint(fix_val.v)
cint_values = list(map(cfix_to_cint, values))
writesocketc(client_id, message_type, *cint_values)
@staticmethod
def malloc(size):
return program.malloc(size, cint)
@staticmethod
def n_elements():
return 1
@vectorize_init
def __init__(self, v=None, k=None, f=None, size=None):
f = f or self.f
k = k or self.k
self.f = f
self.k = k
self.size = get_global_vector_size()
if isinstance(v, cint):
self.v = cint(v,size=self.size)
elif isinstance(v, cfix.scalars):
v = v * (2 ** f)
try:
v = int(round(v))
except TypeError:
pass
self.v = cint(v, size=self.size)
elif isinstance(v, cfix):
self.v = v.v
elif isinstance(v, MemValue):
self.v = v
elif v is None:
self.v = cint(0)
else:
raise CompilerError('cannot initialize cfix with %s' % v)
@vectorize
def load_int(self, v):
self.v = cint(v) * (2 ** self.f)
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
res = cfix()
res.load_int(other)
return res
except (TypeError, CompilerError):
pass
return cls(other)
def store_in_mem(self, address):
self.v.store_in_mem(address)
def sizeof(self):
return self.size * 4
@vectorize
def add(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v + other.v)
else:
return NotImplemented
@vectorize
def mul(self, other):
other = parse_type(other)
if isinstance(other, cfix):
assert self.f == other.f
sgn = cint(1 - 2 * (self.v * other.v < 0))
absolute = self.v * other.v * sgn
val = sgn * (absolute >> self.f)
return cfix(val)
elif isinstance(other, sfix):
return NotImplemented
else:
raise CompilerError('Invalid type %s for cfix.__mul__' % type(other))
@vectorize
def __sub__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(self.v - other.v)
elif isinstance(other, sfix):
return sfix(self.v - other.v)
else:
raise NotImplementedError
@vectorize
def __neg__(self):
# cfix type always has .v
return cfix(-self.v)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v == other.v
elif isinstance(other, sfix):
return other.v.equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v < other.v
elif isinstance(other, sfix):
if(self.k != other.k or self.f != other.f):
raise TypeError('Incompatible fixed point types in comparison')
return other.v.greater_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v <= other.v
elif isinstance(other, sfix):
return other.v.greater_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v > other.v
elif isinstance(other, sfix):
return other.v.less_than(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v >= other.v
elif isinstance(other, sfix):
return other.v.less_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return self.v != other.v
elif isinstance(other, sfix):
return other.v.not_equal(self.v, self.k, other.kappa)
else:
raise NotImplementedError
@vectorize
def __truediv__(self, other):
other = parse_type(other)
if isinstance(other, cfix):
return cfix(library.cint_cint_division(self.v, other.v, self.k, self.f))
elif isinstance(other, sfix):
return sfix(library.FPDiv(self.v, other.v, self.k, self.f,
other.kappa, nearest=sfix.round_nearest))
else:
raise TypeError('Incompatible fixed point types in division')
def print_plain(self):
if self.k > 64:
raise CompilerError('Printing of fixed-point numbers not ' +
'implemented for more than 64-bit precision')
tmp = regint()
convmodp(tmp, self.v, bitlength=self.k)
sign = cint(tmp < 0)
abs_v = sign.if_else(-self.v, self.v)
print_float_plain(cint(abs_v), cint(-self.f), \
cint(0), cint(sign))
class _single(_number, _structure):
""" Representation as single integer preserving the order """
""" E.g. fixed-point numbers """
__slots__ = ['v']
kappa = 40
round_nearest = False
@property
@classmethod
def reg_type(cls):
return cls.int_type.reg_type
@classmethod
def receive_from_client(cls, n, client_id, message_type=ClientMessageType.NoType):
""" Securely obtain shares of n values input by a client.
Assumes client has already run bit shift to convert fixed point to integer."""
sint_inputs = cls.int_type.receive_from_client(n, client_id, ClientMessageType.TripleShares)
return list(map(cls, sint_inputs))
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
return cls._new(cls.int_type.load_mem(address))
@classmethod
@read_mem_value
def conv(cls, other):
if isinstance(other, cls):
return other
else:
try:
return cls.from_sint(other)
except (TypeError, CompilerError):
pass
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@classmethod
def malloc(cls, size):
return program.malloc(size, cls.int_type)
@staticmethod
def n_elements():
return 1
@classmethod
def dot_product(cls, x, y, res_params=None):
return cls.unreduced_dot_product(x, y, res_params).reduce_after_mul()
@classmethod
def unreduced_dot_product(cls, x, y, res_params=None):
dp = cls.int_type.dot_product([xx.pre_mul() for xx in x],
[yy.pre_mul() for yy in y])
return x[0].unreduced(dp, y[0], res_params, len(x))
@classmethod
def row_matrix_mul(cls, row, matrix, res_params=None):
int_matrix = [y.get_vector().pre_mul() for y in matrix]
col = cls.int_type.row_matrix_mul([x.pre_mul() for x in row],
int_matrix)
res = row[0].unreduced(col, matrix[0][0], res_params,
len(row)).reduce_after_mul()
return res
@classmethod
def matrix_mul(cls, A, B, n, res_params=None):
AA = A.pre_mul()
BB = B.pre_mul()
CC = cls.int_type.matrix_mul(AA, BB, n)
res = A.unreduced(CC, B, res_params, n).reduce_after_mul()
return res
def store_in_mem(self, address):
self.v.store_in_mem(address)
@property
def size(self):
return self.v.size
def sizeof(self):
return self.size
def __len__(self):
return len(self.v)
@vectorize
def __sub__(self, other):
other = self.coerce(other)
return self + (-other)
def __rsub__(self, other):
return -self + other
@vectorize
def __eq__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __le__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __lt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.less_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ge__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __gt__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.greater_than(other.v, self.k, self.kappa)
else:
raise NotImplementedError
@vectorize
def __ne__(self, other):
other = self.coerce(other)
if isinstance(other, (cfix, _single)):
return self.v.not_equal(other.v, self.k, self.kappa)
else:
raise NotImplementedError
class _fix(_single):
""" Shared fixed point type. """
__slots__ = ['v', 'f', 'k', 'size']
@classmethod
def set_precision(cls, f, k = None):
cls.f = f
# default bitlength = 2*precision
if k is None:
cls.k = 2 * f
else:
if k < f:
raise CompilerError('bit length cannot be less than precision')
cls.k = k
@classmethod
def coerce(cls, other):
if isinstance(other, (_fix, cls.clear_type)):
return other
else:
return cls.conv(other)
@classmethod
def from_sint(cls, other, k=None, f=None):
res = cls()
res.f = f or cls.f
res.k = k or cls.k
res.load_int(cls.int_type.conv(other))
return res
@classmethod
def _new(cls, other, k=None, f=None):
res = cls(other)
res.k = k or cls.k
res.f = f or cls.f
return res
@vectorize_init
def __init__(self, _v=None, size=None):
self.size = get_global_vector_size()
f = self.f
k = self.k
# warning: don't initialize a sfix from a sint, this is only used in internal methods;
# for external initialization use load_int.
if _v is None:
self.v = self.int_type(0)
elif isinstance(_v, self.int_type):
self.v = _v
self.size = _v.size
elif isinstance(_v, cfix.scalars):
self.v = self.int_type(int(round(_v * (2 ** f))), size=self.size)
elif isinstance(_v, self.float_type):
p = (f + _v.p)
b = (p.greater_equal(0, _v.vlen))
a = b*(_v.v << (p)) + (1-b)*(_v.v >> (-p))
self.v = (1-2*_v.s)*a
elif isinstance(_v, type(self)):
self.v = _v.v
elif isinstance(_v, (MemValue, MemFix)):
#this is a memvalue object
self.v = type(self)(_v.read()).v
else:
raise CompilerError('cannot convert %s to sfix' % _v)
if not isinstance(self.v, self.int_type):
raise CompilerError('sfix conversion failure: %s/%s' % (_v, self.v))
@vectorize
def load_int(self, v):
self.v = self.int_type(v) << self.f
def __getitem__(self, index):
return self._new(self.v[index])
@vectorize
def add(self, other):
other = self.coerce(other)
if isinstance(other, (_fix, cfix)):
return self._new(self.v + other.v, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
tmp = cfix(other, k=self.k, f=self.f)
return self + tmp
else:
return NotImplemented
@vectorize
def mul(self, other):
if isinstance(other, (sint, cint, regint, int)):
return self._new(self.v * other, k=self.k, f=self.f)
elif isinstance(other, float):
if int(other) == other:
return self.mul(int(other))
v = int(round(other * 2 ** self.f))
if v == 0:
return 0
f = self.f
while v % 2 == 0:
f -= 1
v //= 2
k = len(bin(abs(v))) - 1
other = self.multipliable(v, k, f)
other = self.coerce(other)
if isinstance(other, (_fix, self.clear_type)):
val = self.v.TruncMul(other.v, self.k + other.k, other.f,
self.kappa,
self.round_nearest)
if self.size >= other.size:
return self._new(val, k=self.k, f=self.f)
else:
return self.vec._new(val, k=self.k, f=self.f)
elif isinstance(other, cfix.scalars):
scalar_fix = cfix(other)
return self * scalar_fix
else:
return NotImplemented
@vectorize
def __neg__(self):
return type(self)(-self.v)
@vectorize
def __truediv__(self, other):
other = self.coerce(other)
if isinstance(other, _fix):
return type(self)(library.FPDiv(self.v, other.v, self.k, self.f,
self.kappa,
nearest=self.round_nearest))
elif isinstance(other, cfix):
return type(self)(library.sint_cint_division(self.v, other.v, self.k, self.f, self.kappa))
else:
raise TypeError('Incompatible fixed point types in division')
@vectorize
def __rtruediv__(self, other):
return self.coerce(other) / self
@vectorize
def compute_reciprocal(self):
return type(self)(library.FPDiv(cint(2) ** self.f, self.v, self.k, self.f, self.kappa, True))
def reveal(self):
val = self.v.reveal()
res = self.clear_type(val)
res.f = self.f
res.k = self.k
return res
class sfix(_fix):
int_type = sint
clear_type = cfix
@vectorized_classmethod
def get_input_from(cls, player):
v = cls.int_type()
inputmixed('fix', v, cls.f, player)
return cls._new(v)
@vectorized_classmethod
def get_random(cls, lower, upper):
""" Uniform random number around centre of bounds """
""" Range can be smaller """
log_range = int(math.log(upper - lower, 2))
n_bits = log_range + cls.f
average = lower + 0.5 * (upper - lower)
lower = average - 0.5 * 2 ** log_range
return cls._new(cls.int_type.get_random_int(n_bits)) + lower
def coerce(self, other):
return parse_type(other, k=self.k, f=self.f)
def mul_no_reduce(self, other, res_params=None):
assert self.f == other.f
return self.unreduced(self.v * other.v)
def pre_mul(self):
return self.v
def unreduced(self, v, other=None, res_params=None, n_summands=1):
return unreduced_sfix(v, self.k * 2, self.f, self.kappa)
@staticmethod
def multipliable(v, k, f):
return cfix(cint.conv(v), k, f)
class unreduced_sfix(_single):
int_type = sint
@classmethod
def _new(cls, v):
return cls(v, 2 * sfix.k, sfix.f, sfix.kappa)
def __init__(self, v, k, m, kappa):
self.v = v
self.k = k
self.m = m
self.kappa = kappa
def __add__(self, other):
if other is 0:
return self
assert self.k == other.k
assert self.m == other.m
assert self.kappa == other.kappa
return unreduced_sfix(self.v + other.v, self.k, self.m, self.kappa)
__radd__ = __add__
@vectorize
def reduce_after_mul(self):
return sfix(sfix.int_type.round(self.v, self.k, self.m, self.kappa,
nearest=sfix.round_nearest,
signed=True))
sfix.unreduced_type = unreduced_sfix
# this is for 20 bit decimal precision
# with 40 bitlength of entire number
# these constants have been chosen for multiplications to fit in 128 bit prime field
# (precision n1) 41 + (precision n2) 41 + (stat_sec) 40 = 82 + 40 = 122 <= 128
# with statistical security of 40
fixed_lower = 20
fixed_upper = 40
sfix.set_precision(fixed_lower, fixed_upper)
cfix.set_precision(fixed_lower, fixed_upper)
class squant(_single):
""" Quantization as in ArXiv:1712.05877v1 """
__slots__ = ['params']
int_type = sint
clamp = True
@classmethod
def set_params(cls, S, Z=0, k=8):
cls.params = squant_params(S, Z, k)
@classmethod
def from_sint(cls, other):
raise CompilerError('sint to squant conversion not implemented')
@classmethod
def _new(cls, value, params=None):
res = cls(params=params)
res.v = value
return res
@read_mem_value
def __init__(self, value=None, params=None):
if params is not None:
self.params = params
if value is None:
# need to set v manually
pass
elif isinstance(value, cfix.scalars):
set_global_vector_size(1)
q = util.round_to_int(value / self.S + self.Z)
if util.is_constant(q) and (q < 0 or q >= 2**self.k):
raise CompilerError('%f not quantizable' % value)
self.v = self.int_type(q)
reset_global_vector_size()
elif isinstance(value, squant) and value.params == self.params:
self.v = value.v
else:
raise CompilerError('cannot convert %s to squant' % value)
def __getitem__(self, index):
return type(self)._new(self.v[index], self.params)
def get_params(self):
return self.params
@property
def S(self):
return self.params.S
@property
def Z(self):
return self.params.Z
@property
def k(self):
return self.params.k
def coerce(self, other):
other = self.conv(other)
return self._new(util.expand(other.v, self.size), other.params)
@vectorize
def add(self, other):
other = self.coerce(other)
assert self.get_params() == other.get_params()
return self._new(self.v + other.v - util.expand(self.Z, self.v.size))
def mul(self, other, res_params=None):
return self.mul_no_reduce(other, res_params).reduce_after_mul()
def mul_no_reduce(self, other, res_params=None):
if isinstance(other, (sint, cint, regint)):
return self._new(other * (self.v - self.Z) + self.Z,
params=self.get_params())
other = self.coerce(other)
tmp = (self.v - self.Z) * (other.v - other.Z)
return _unreduced_squant(tmp, (self.get_params(), other.get_params()),
res_params=res_params)
def pre_mul(self):
return self.v - util.expand(self.Z, self.v.size)
def unreduced(self, v, other, res_params=None, n_summands=1):
return _unreduced_squant(v, (self.get_params(), other.get_params()),
res_params, n_summands)
@vectorize
def for_mux(self, other):
other = self.coerce(other)
assert self.params == other.params
f = lambda x: self._new(x, self.params)
return f, self.v, other.v
@vectorize
def __neg__(self):
return self._new(-self.v + 2 * util.expand(self.Z, self.v.size))
class _unreduced_squant(object):
def __init__(self, v, params, res_params=None, n_summands=1):
self.v = v
self.params = params
self.n_summands = n_summands
self.res_params = res_params or params[0]
def __add__(self, other):
if other is 0:
return self
assert self.params == other.params
assert self.res_params == other.res_params
return _unreduced_squant(self.v + other.v, self.params, self.res_params,
self.n_summands + other.n_summands)
__radd__ = __add__
def reduce_after_mul(self):
return squant_params.conv(self.res_params).reduce(self)
class squant_params(object):
max_n_summands = 2048
@staticmethod
def conv(other):
if isinstance(other, squant_params):
return other
else:
return squant_params(*other)
def __init__(self, S, Z=0, k=8):
try:
self.S = float(S)
except:
self.S = S
self.Z = MemValue.if_necessary(Z)
self.k = k
self._store = {}
if program.options.ring:
# cheaper probabilistic truncation
self.max_length = int(program.options.ring) - 1
else:
# safe choice for secret shift
self.max_length = 71
def __iter__(self):
yield self.S
yield self.Z
yield self.k
def is_constant(self):
return util.is_constant_float(self.S) and util.is_constant(self.Z)
def get(self, input_params, n_summands):
p = input_params
M = p[0].S * p[1].S / self.S
logM = util.log2(M)
n_shift = self.max_length - p[0].k - p[1].k - util.log2(n_summands)
if util.is_constant_float(M):
n_shift -= logM
int_mult = int(round(M * 2 ** (n_shift)))
else:
int_mult = MemValue(M.v << (n_shift + M.p))
shifted_Z = MemValue.if_necessary(self.Z << n_shift)
return n_shift, int_mult, shifted_Z
def precompute(self, *input_params):
self._store[input_params] = self.get(input_params, self.max_n_summands)
def get_stored(self, unreduced):
assert unreduced.n_summands <= self.max_n_summands
return self._store[unreduced.params]
def reduce(self, unreduced):
ps = (self,) + unreduced.params
if reduce(operator.and_, (p.is_constant() for p in ps)):
n_shift, int_mult, shifted_Z = self.get(unreduced.params,
unreduced.n_summands)
else:
n_shift, int_mult, shifted_Z = self.get_stored(unreduced)
size = unreduced.v.size
n_shift = util.expand(n_shift, size)
shifted_Z = util.expand(shifted_Z, size)
int_mult = util.expand(int_mult, size)
tmp = unreduced.v * int_mult + shifted_Z
shifted = tmp.round(self.max_length, n_shift,
kappa=squant.kappa, nearest=squant.round_nearest,
signed=True)
if squant.clamp:
length = max(self.k, self.max_length - n_shift) + 1
top = (1 << self.k) - 1
over = shifted.greater_than(top, length, squant.kappa)
under = shifted.less_than(0, length, squant.kappa)
shifted = over.if_else(top, shifted)
shifted = under.if_else(0, shifted)
return squant._new(shifted, params=self)
class sfloat(_number, _structure):
""" Shared floating point data type, representing (1 - 2s)*(1 - z)*v*2^p.
v: significand
p: exponent
z: zero flag
s: sign bit
"""
__slots__ = ['v', 'p', 'z', 's', 'size']
# single precision
vlen = 24
plen = 8
kappa = 40
round_nearest = False
@staticmethod
def n_elements():
return 4
@classmethod
def malloc(cls, size):
return program.malloc(size * cls.n_elements(), sint)
@classmethod
def is_address_tuple(cls, address):
if isinstance(address, (list, tuple)):
assert(len(address) == cls.n_elements())
return True
return False
@vectorized_classmethod
def load_mem(cls, address, mem_type=None):
size = get_global_vector_size()
if cls.is_address_tuple(address):
return sfloat(*(sint.load_mem(a, size=size) for a in address))
res = []
for i in range(4):
res.append(sint.load_mem(address + i * size, size=size))
return sfloat(*res)
@classmethod
def set_error(cls, error):
# incompatible with loops
#cls.error += error - cls.error * error
cls.error = error
pass
@classmethod
def conv(cls, other):
if isinstance(other, cls):
return other
else:
return cls(other)
@classmethod
def coerce(cls, other):
return cls.conv(other)
@staticmethod
def convert_float(v, vlen, plen):
if v < 0:
s = 1
else:
s = 0
if v == 0:
v = 0
p = 0
z = 1
else:
p = int(math.floor(math.log(abs(v), 2))) - vlen + 1
vv = v
v = int(round(abs(v) * 2 ** (-p)))
if v == 2 ** vlen:
p += 1
v //= 2
z = 0
if p < -2 ** (plen - 1):
print('Warning: %e truncated to zero' % vv)
v, p, z = 0, 0, 1
if p >= 2 ** (plen - 1):
raise CompilerError('Cannot convert %s to float ' \
'with %d exponent bits' % (vv, plen))
return v, p, z, s
@vectorized_classmethod
def get_input_from(cls, player):
v = sint()
p = sint()
z = sint()
s = sint()
inputmixed('float', v, p, z, s, cls.vlen, player)
return cls(v, p, z, s)
@vectorize_init
@read_mem_value
def __init__(self, v, p=None, z=None, s=None, size=None):
self.size = get_global_vector_size()
if p is None:
if isinstance(v, sfloat):
p = v.p
z = v.z
s = v.s
v = v.v
elif isinstance(v, sfix):
f = v.f
v, p, z, s = floatingpoint.Int2FL(v.v, v.k,
self.vlen, self.kappa)
p = p - f
elif util.is_constant_float(v):
v, p, z, s = self.convert_float(v, self.vlen, self.plen)
else:
v, p, z, s = floatingpoint.Int2FL(sint.conv(v),
program.bit_length,
self.vlen, self.kappa)
if isinstance(v, int):
if not ((v >= 2**(self.vlen-1) and v < 2**(self.vlen)) or v == 0):
raise CompilerError('Floating point number malformed: significand')
self.v = library.load_int_to_secret(v)
else:
self.v = v
if isinstance(p, int):
if not (p >= -2**(self.plen - 1) and p < 2**(self.plen - 1)):
raise CompilerError('Floating point number malformed: exponent %d not unsigned %d-bit integer' % (p, self.plen))
self.p = library.load_int_to_secret(p)
else:
self.p = p
if isinstance(z, int):
if not (z == 0 or z == 1):
raise CompilerError('Floating point number malformed: zero bit')
self.z = sint()
ldsi(self.z, z)
else:
self.z = z
if isinstance(s, int):
if not (s == 0 or s == 1):
raise CompilerError('Floating point number malformed: sign')
self.s = sint()
ldsi(self.s, s)
else:
self.s = s
def __getitem__(self, index):
return sfloat(*(x[index] for x in self))
def __iter__(self):
yield self.v
yield self.p
yield self.z
yield self.s
def store_in_mem(self, address):
if self.is_address_tuple(address):
for a, x in zip(address, self):
x.store_in_mem(a)
return
for i,x in enumerate((self.v, self.p, self.z, self.s)):
x.store_in_mem(address + i * self.size)
def sizeof(self):
return self.size * self.n_elements()
@vectorize
def add(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
a,c,d,e = [sint() for i in range(4)]
t = sint()
t2 = sint()
v1 = self.v
v2 = other.v
p1 = self.p
p2 = other.p
s1 = self.s
s2 = other.s
z1 = self.z
z2 = other.z
a = p1.less_than(p2, self.plen, self.kappa)
b = floatingpoint.EQZ(p1 - p2, self.plen, self.kappa)
c = v1.less_than(v2, self.vlen, self.kappa)
ap1 = a*p1
ap2 = a*p2
aneg = 1 - a
bneg = 1 - b
cneg = 1 - c
av1 = a*v1
av2 = a*v2
cv1 = c*v1
cv2 = c*v2
pmax = ap2 + p1 - ap1
pmin = p2 - ap2 + ap1
vmax = bneg*(av2 + v1 - av1) + b*(cv2 + v1 - cv1)
vmin = bneg*(av1 + v2 - av2) + b*(cv1 + v2 - cv2)
s3 = s1 + s2 - 2 * s1 * s2
comparison.LTZ(d, self.vlen + pmin - pmax + sfloat.round_nearest,
self.plen, self.kappa)
pow_delta = floatingpoint.Pow2((1 - d) * (pmax - pmin),
self.vlen + 1 + sfloat.round_nearest,
self.kappa)
# deviate from paper for more precision
#v3 = 2 * (vmax - s3) + 1
v3 = vmax
v4 = vmax * pow_delta + (1 - 2 * s3) * vmin
to_trunc = (d * v3 + (1 - d) * v4)
if program.options.ring:
to_trunc <<= 1 + sfloat.round_nearest
v = floatingpoint.TruncInRing(to_trunc,
2 * (self.vlen + 1 +
sfloat.round_nearest),
pow_delta)
else:
to_trunc *= two_power(self.vlen + sfloat.round_nearest)
v = to_trunc * floatingpoint.Inv(pow_delta)
comparison.Trunc(t, v, 2 * self.vlen + 1 + sfloat.round_nearest,
self.vlen - 1, self.kappa, False)
v = t
u = floatingpoint.BitDec(v, self.vlen + 2 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest, self.kappa,
list(range(1 + sfloat.round_nearest,
self.vlen + 2 + sfloat.round_nearest)))
# using u[0] doesn't seem necessary
h = floatingpoint.PreOR(u[:sfloat.round_nearest:-1], self.kappa)
p0 = self.vlen + 1 - sum(h)
pow_p0 = 1 + sum([two_power(i) * (1 - h[i]) for i in range(len(h))])
if self.round_nearest:
t2, overflow = \
floatingpoint.TruncRoundNearestAdjustOverflow(pow_p0 * v,
self.vlen + 3,
self.vlen,
self.kappa)
p0 = p0 - overflow
else:
comparison.Trunc(t2, pow_p0 * v, self.vlen + 2, 2, self.kappa, False)
v = t2
# deviate for more precision
#p = pmax - p0 + 1 - d
p = pmax - p0 + 1
zz = self.z*other.z
zprod = 1 - self.z - other.z + zz
v = zprod*t2 + self.z*v2 + other.z*v1
z = floatingpoint.EQZ(v, self.vlen, self.kappa)
p = (zprod*p + self.z*p2 + other.z*p1)*(1 - z)
s = (1 - b)*(a*other.s + aneg*self.s) + b*(c*other.s + cneg*self.s)
s = zprod*s + (other.z - zz)*self.s + (self.z - zz)*other.s
return sfloat(v, p, z, s)
else:
return NotImplemented
@vectorize_max
def mul(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
v1 = sint()
v2 = sint()
b = sint()
c2expl = cint()
comparison.ld2i(c2expl, self.vlen)
if sfloat.round_nearest:
v1 = comparison.TruncRoundNearest(self.v*other.v, 2*self.vlen,
self.vlen-1, self.kappa)
else:
comparison.Trunc(v1, self.v*other.v, 2*self.vlen, self.vlen-1, self.kappa, False)
t = v1 - c2expl
comparison.LTZ(b, t, self.vlen+1, self.kappa)
comparison.Trunc(v2, b*v1 + v1, self.vlen+1, 1, self.kappa, False)
z1, z2, s1, s2, p1, p2 = (x.expand_to_vector() for x in \
(self.z, other.z, self.s, other.s,
self.p, other.p))
z = z1 + z2 - self.z*other.z # = OR(z1, z2)
s = s1 + s2 - self.s*other.s*2 # = XOR(s1,s2)
p = (p1 + p2 - b + self.vlen)*(1 - z)
return sfloat(v2, p, z, s)
else:
return NotImplemented
def __sub__(self, other):
return self + -other
def __rsub__(self, other):
return -self + other
def __truediv__(self, other):
other = self.conv(other)
v = floatingpoint.SDiv(self.v, other.v + other.z * (2**self.vlen - 1),
self.vlen, self.kappa, self.round_nearest)
b = v.less_than(two_power(self.vlen-1), self.vlen + 1, self.kappa)
overflow = v.greater_equal(two_power(self.vlen), self.vlen + 1, self.kappa)
underflow = v.less_than(two_power(self.vlen-2), self.vlen + 1, self.kappa)
v = (v + b * v) * (1 - overflow) * (1 - underflow) + \
overflow * (2**self.vlen - 1) + \
underflow * (2**(self.vlen-1)) * (1 - self.z)
p = (1 - self.z) * (self.p - other.p - self.vlen - b + 1)
z = self.z
s = self.s + other.s - 2 * self.s * other.s
sfloat.set_error(other.z)
return sfloat(v, p, z, s)
def __rtruediv__(self, other):
return self.conv(other) / self
@vectorize
def __neg__(self):
return sfloat(self.v, self.p, self.z, (1 - self.s) * (1 - self.z))
@vectorize
def __lt__(self, other):
other = self.conv(other)
if isinstance(other, sfloat):
z1 = self.z
z2 = other.z
s1 = self.s
s2 = other.s
a = self.p.less_than(other.p, self.plen, self.kappa)
c = floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa)
d = ((1 - 2*self.s)*self.v).less_than((1 - 2*other.s)*other.v, self.vlen + 1, self.kappa)
cd = c*d
ca = c*a
b1 = cd + a - ca
b2 = cd + 1 + ca - c - a
s12 = self.s*other.s
z12 = self.z*other.z
b = (z1 - z12)*(1 - s2) + (z2 - z12)*s1 + (1 + z12 - z1 - z2)*(s1 - s12 + (1 + s12 - s1 - s2)*b1 + s12*b2)
return b
else:
return NotImplemented
def __ge__(self, other):
return 1 - (self < other)
def __gt__(self, other):
return self.conv(other) < self
def __le__(self, other):
return self.conv(other) >= self
@vectorize
def __eq__(self, other):
other = self.conv(other)
# the sign can be both ways for zeroes
both_zero = self.z * other.z
return floatingpoint.EQZ(self.v - other.v, self.vlen, self.kappa) * \
floatingpoint.EQZ(self.p - other.p, self.plen, self.kappa) * \
(1 - self.s - other.s + 2 * self.s * other.s) * \
(1 - both_zero) + both_zero
def __ne__(self, other):
return 1 - (self == other)
def log2(self):
up = self.v.greater_than(1 << (self.vlen - 1), self.vlen, self.kappa)
return self.p + self.vlen - 1 + up
def round_to_int(self):
direction = self.p.greater_equal(-self.vlen, self.plen, self.kappa)
right = self.v.right_shift(-self.p - 1, self.vlen + 1, self.kappa)
up = right.mod2m(1, self.vlen + 1, self.kappa)
right = right.right_shift(1, self.vlen + 1, self.kappa) + up
abs_value = direction * right
return self.s.if_else(-abs_value, abs_value)
def value(self):
""" Gets actual floating point value, if emulation is enabled. """
return (1 - 2*self.s.value)*(1 - self.z.value)*self.v.value/float(2**self.p.value)
def reveal(self):
return cfloat(self.v.reveal(), self.p.reveal(), self.z.reveal(), self.s.reveal())
class cfloat(object):
# Helper class used for printing sfloats
__slots__ = ['v', 'p', 'z', 's']
def __init__(self, v, p, z, s):
self.v, self.p, self.z, self.s = [cint.conv(x) for x in (v, p, z, s)]
def print_float_plain(self):
print_float_plain(self.v, self.p, self.z, self.s)
sfix.float_type = sfloat
_types = {
'c': cint,
's': sint,
'sg': sgf2n,
'cg': cgf2n,
'ci': regint,
}
def _get_type(t):
if t in _types:
return _types[t]
else:
return t
class Array(object):
@classmethod
def create_from(cls, l):
if isinstance(l, cls):
return l
tmp = list(l)
res = cls(len(tmp), type(tmp[0]))
res.assign(tmp)
return res
def __init__(self, length, value_type, address=None, debug=None):
value_type = _get_type(value_type)
self.address = address
self.length = length
self.value_type = value_type
if address is None:
self.address = self._malloc()
self.address_cache = {}
self.debug = debug
def _malloc(self):
return self.value_type.malloc(self.length)
def delete(self):
if program:
program.free(self.address, self.value_type.reg_type)
def get_address(self, index):
key = str(index)
if isinstance(index, int) and self.length is not None:
index += self.length * (index < 0)
if index >= self.length or index < 0:
raise IndexError('index %s, length %s' % \
(str(index), str(self.length)))
if (program.curr_block, key) not in self.address_cache:
n = self.value_type.n_elements()
length = self.length
if n == 1:
# length can be None for single-element arrays
length = 0
self.address_cache[program.curr_block, key] = \
util.untuplify([self.address + index + i * length \
for i in range(n)])
if self.debug:
library.print_ln_if(index >= self.length, 'OF:' + self.debug)
library.print_ln_if(self.address_cache[program.curr_block, key] >= program.allocated_mem[self.value_type.reg_type], 'AOF:' + self.debug)
return self.address_cache[program.curr_block, key]
def get_slice(self, index):
if index.stop is None and self.length is None:
raise CompilerError('Cannot slice array of unknown length')
return index.start or 0, index.stop or self.length, index.step or 1
def __getitem__(self, index):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
res_length = (stop - start - 1) // step + 1
res = Array(res_length, self.value_type)
@library.for_range(res_length)
def f(i):
res[i] = self[start+i*step]
return res
return self._load(self.get_address(index))
def __setitem__(self, index, value):
if isinstance(index, slice):
start, stop, step = self.get_slice(index)
value = Array.create_from(value)
source_index = MemValue(0)
@library.for_range(start, stop, step)
def f(i):
self[i] = value[source_index]
source_index.iadd(1)
return
self._store(value, self.get_address(index))
# the following two are useful for compile-time lengths
# and thus differ from the usual Python syntax
def get_range(self, start, size):
return [self[start + i] for i in range(size)]
def set_range(self, start, values):
for i, value in enumerate(values):
self[start + i] = value
def _load(self, address):
return self.value_type.load_mem(address)
def _store(self, value, address):
self.value_type.conv(value).store_in_mem(address)
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self[i]
def same_shape(self):
return Array(self.length, self.value_type)
def assign(self, other, base=0):
try:
other = other.get_vector()
except:
pass
try:
other.store_in_mem(self.get_address(base))
assert len(self) >= other.size + base
except AttributeError:
for i,j in enumerate(other):
self[i] = j
return self
def assign_all(self, value, use_threads=True, conv=True):
if conv:
value = self.value_type.conv(value)
mem_value = MemValue(value)
n_threads = 8 if use_threads and len(self) > 2**20 else 1
@library.for_range_multithread(n_threads, 1024, len(self))
def f(i):
self[i] = mem_value
return self
def get_vector(self, base=0, size=None):
size = size or self.length
return self.value_type.load_mem(self.get_address(base), size=size)
def get_mem_value(self, index):
return MemValue(self[index], self.get_address(index))
def input_from(self, player, budget=None):
self.assign(self.value_type.get_input_from(player, size=len(self)))
def __add__(self, other):
if other is 0:
return self
assert len(self) == len(other)
return self.get_vector() + other
def __sub__(self, other):
assert len(self) == len(other)
return self.get_vector() - other
def __mul__(self, value):
return self.get_vector() * value
def __pow__(self, value):
return self.get_vector() ** value
__radd__ = __add__
__rmul__ = __mul__
def shuffle(self):
@library.for_range(len(self))
def _(i):
j = regint.get_random(64) % (len(self) - i)
tmp = self[i]
self[i] = self[i + j]
self[i + j] = tmp
def reveal(self):
return Array.create_from(x.reveal() for x in self)
sint.dynamic_array = Array
sgf2n.dynamic_array = Array
class SubMultiArray(object):
def __init__(self, sizes, value_type, address, index, debug=None):
self.sizes = sizes
self.value_type = _get_type(value_type)
self.address = address + index * self.total_size()
self.sub_cache = {}
self.debug = debug
if debug:
library.print_ln_if(self.address + reduce(operator.mul, self.sizes) * self.value_type.n_elements() > program.allocated_mem[self.value_type.reg_type], 'AOF%d:' % len(self.sizes) + self.debug)
def __getitem__(self, index):
if util.is_constant(index) and index >= self.sizes[0]:
raise StopIteration
key = program.curr_block, str(index)
if key not in self.sub_cache:
if self.debug:
library.print_ln_if(index >= self.sizes[0], \
'OF%d:' % len(self.sizes) + self.debug)
if len(self.sizes) == 2:
self.sub_cache[key] = \
Array(self.sizes[1], self.value_type, \
self.address + index * self.sizes[1] *
self.value_type.n_elements(), \
debug=self.debug)
else:
self.sub_cache[key] = \
SubMultiArray(self.sizes[1:], self.value_type, \
self.address, index, debug=self.debug)
return self.sub_cache[key]
def __setitem__(self, index, other):
self[index].assign(other)
def __len__(self):
return self.sizes[0]
def assign_all(self, value):
@library.for_range(self.sizes[0])
def f(i):
self[i].assign_all(value)
return self
def total_size(self):
return reduce(operator.mul, self.sizes) * self.value_type.n_elements()
def get_vector(self, base=0, size=None):
assert self.value_type.n_elements() == 1
size = size or self.total_size()
return self.value_type.load_mem(self.address + base, size=size)
def assign_vector(self, vector, base=0):
assert self.value_type.n_elements() == 1
assert vector.size <= self.total_size()
vector.store_in_mem(self.address + base)
def assign(self, other):
if self.value_type.n_elements() > 1:
assert self.sizes == other.sizes
self.assign_vector(other.get_vector())
def same_shape(self):
return MultiArray(self.sizes, self.value_type)
def input_from(self, player, budget=None):
@library.for_range_opt(self.sizes[0], budget=budget)
def _(i):
self[i].input_from(player, budget=budget)
def schur(self, other):
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() * other.get_vector())
return res
def __add__(self, other):
if other is 0:
return self
assert self.sizes == other.sizes
if len(self.sizes) == 2:
res = Matrix(self.sizes[0], self.sizes[1], self.value_type)
else:
res = MultiArray(self.sizes, self.value_type)
res.assign_vector(self.get_vector() + other.get_vector())
return res
__radd__ = __add__
def iadd(self, other):
assert self.sizes == other.sizes
self.assign_vector(self.get_vector() + other.get_vector())
def __mul__(self, other):
return self.mul(other)
def mul(self, other, res_params=None):
assert len(self.sizes) == 2
if isinstance(other, Array):
assert len(other) == self.sizes[1]
if self.value_type.n_elements() == 1:
matrix = Matrix(len(other), 1, other.value_type, \
address=other.address)
res = self * matrix
return Array(res.sizes[0], res.value_type, address=res.address)
else:
matrix = Matrix(len(other), 1, other.value_type)
for i, x in enumerate(other):
matrix[i][0] = x
res = self * matrix
return Array.create_from(x[0] for x in res)
elif isinstance(other, SubMultiArray):
assert len(other.sizes) == 2
assert other.sizes[0] == self.sizes[1]
if res_params is not None:
class t(self.value_type):
pass
t.params = res_params
else:
t = self.value_type
res_matrix = Matrix(self.sizes[0], other.sizes[1], t)
try:
if max(res_matrix.sizes) > 1000:
raise AttributeError()
A = self.get_vector()
B = other.get_vector()
res_matrix.assign_vector(
self.value_type.matrix_mul(A, B, self.sizes[1],
res_params))
except (AttributeError, AssertionError):
# fallback for sfloat etc.
@library.for_range_opt(self.sizes[0])
def _(i):
try:
res_matrix[i] = self.value_type.row_matrix_mul(
self[i], other, res_params)
except AttributeError:
# fallback for binary circuits
@library.for_range(other.sizes[1])
def _(j):
res_matrix[i][j] = 0
@library.for_range(self.sizes[1])
def _(k):
res_matrix[i][j] += self[i][k] * other[k][j]
return res_matrix
else:
raise NotImplementedError
def budget_mul(self, other, n_rows, row, n_columns, column, reduce=True,
res=None):
assert len(self.sizes) == 2
assert len(other.sizes) == 2
if res is None:
if reduce:
res_matrix = Matrix(n_rows, n_columns, self.value_type)
else:
res_matrix = Matrix(n_rows, n_columns, \
self.value_type.unreduced_type)
else:
res_matrix = res
@library.for_range_opt(n_rows)
def _(i):
@library.for_range_opt(n_columns)
def _(j):
col = column(other, j)
r = row(self, i)
if reduce:
res_matrix[i][j] = self.value_type.dot_product(r, col)
else:
entry = self.value_type.unreduced_dot_product(r, col)
res_matrix[i][j] = entry
return res_matrix
def plain_mul(self, other, res=None):
assert other.sizes[0] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
res=res)
def mul_trans(self, other):
assert other.sizes[1] == self.sizes[1]
return self.budget_mul(other, self.sizes[0], lambda x, i: x[i], \
other.sizes[0], lambda x, j: x[j])
def trans_mul(self, other, reduce=True, res=None):
assert other.sizes[0] == self.sizes[0]
return self.budget_mul(other, self.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))], \
other.sizes[1], \
lambda x, j: [x[k][j] for k in range(len(x))],
reduce=reduce, res=res)
def transpose(self):
assert len(self.sizes) == 2
res = Matrix(self.sizes[1], self.sizes[0], self.value_type)
@library.for_range_opt(self.sizes[1])
def _(i):
@library.for_range_opt(self.sizes[0])
def _(j):
res[i][j] = self[j][i]
return res
class MultiArray(SubMultiArray):
def __init__(self, sizes, value_type, debug=None, address=None):
if isinstance(address, Array):
self.array = address
else:
self.array = Array(reduce(operator.mul, sizes), \
value_type, address=address)
SubMultiArray.__init__(self, sizes, value_type, self.array.address, 0, \
debug=debug)
if len(sizes) < 2:
raise CompilerError('Use Array')
class Matrix(MultiArray):
def __init__(self, rows, columns, value_type, debug=None, address=None):
MultiArray.__init__(self, [rows, columns], value_type, debug=debug, \
address=address)
class VectorArray(object):
def __init__(self, length, value_type, vector_size, address=None):
self.array = Array(length * vector_size, value_type, address)
self.vector_size = vector_size
self.value_type = value_type
def __getitem__(self, index):
return self.value_type.load_mem(self.array.address + \
index * self.vector_size,
size=self.vector_size)
def __setitem__(self, index, value):
if value.size != self.vector_size:
raise CompilerError('vector size mismatch')
value.store_in_mem(self.array.address + index * self.vector_size)
class _mem(_number):
__add__ = lambda self,other: self.read() + other
__sub__ = lambda self,other: self.read() - other
__mul__ = lambda self,other: self.read() * other
__truediv__ = lambda self,other: self.read() / other
__mod__ = lambda self,other: self.read() % other
__pow__ = lambda self,other: self.read() ** other
__neg__ = lambda self,other: -self.read()
__lt__ = lambda self,other: self.read() < other
__gt__ = lambda self,other: self.read() > other
__le__ = lambda self,other: self.read() <= other
__ge__ = lambda self,other: self.read() >= other
__eq__ = lambda self,other: self.read() == other
__ne__ = lambda self,other: self.read() != other
__and__ = lambda self,other: self.read() & other
__xor__ = lambda self,other: self.read() ^ other
__or__ = lambda self,other: self.read() | other
__lshift__ = lambda self,other: self.read() << other
__rshift__ = lambda self,other: self.read() >> other
__radd__ = lambda self,other: other + self.read()
__rsub__ = lambda self,other: other - self.read()
__rmul__ = lambda self,other: other * self.read()
__rtruediv__ = lambda self,other: other / self.read()
__rmod__ = lambda self,other: other % self.read()
__rand__ = lambda self,other: other & self.read()
__rxor__ = lambda self,other: other ^ self.read()
__ror__ = lambda self,other: other | self.read()
__iadd__ = lambda self,other: self.write(self.read() + other)
__isub__ = lambda self,other: self.write(self.read() - other)
__imul__ = lambda self,other: self.write(self.read() * other)
__idiv__ = lambda self,other: self.write(self.read() / other)
__imod__ = lambda self,other: self.write(self.read() % other)
__ipow__ = lambda self,other: self.write(self.read() ** other)
__iand__ = lambda self,other: self.write(self.read() & other)
__ixor__ = lambda self,other: self.write(self.read() ^ other)
__ior__ = lambda self,other: self.write(self.read() | other)
__ilshift__ = lambda self,other: self.write(self.read() << other)
__irshift__ = lambda self,other: self.write(self.read() >> other)
iadd = __iadd__
isub = __isub__
imul = __imul__
idiv = __idiv__
imod = __imod__
ipow = __ipow__
iand = __iand__
ixor = __ixor__
ior = __ior__
ilshift = __ilshift__
irshift = __irshift__
store_in_mem = lambda self,address: self.read().store_in_mem(address)
class MemValue(_mem):
__slots__ = ['last_write_block', 'reg_type', 'register', 'address', 'deleted']
@classmethod
def if_necessary(cls, value):
if util.is_constant_float(value):
return value
else:
return cls(value)
def __init__(self, value, address=None):
self.last_write_block = None
if isinstance(value, int):
self.value_type = regint
value = regint(value)
elif isinstance(value, MemValue):
self.value_type = value.value_type
else:
self.value_type = type(value)
self.deleted = False
if address is None:
self.address = self.value_type.malloc(1)
self.write(value)
else:
self.address = address
def delete(self):
self.value_type.free(self.address)
self.deleted = True
def check(self):
if self.deleted:
raise CompilerError('MemValue deleted')
def read(self):
self.check()
if program.curr_block != self.last_write_block:
self.register = library.load_mem(self.address, self.value_type)
self.last_write_block = program.curr_block
return self.register
def write(self, value):
self.check()
if isinstance(value, MemValue):
self.register = value.read()
elif isinstance(value, int):
self.register = self.value_type(value)
else:
self.register = value
if not isinstance(self.register, self.value_type):
raise CompilerError('Mismatch in register type, cannot write \
%s to %s' % (type(self.register), self.value_type))
self.register.store_in_mem(self.address)
self.last_write_block = program.curr_block
return self
def reveal(self):
return self.read().reveal()
less_than = lambda self,other,bit_length=None,security=None: \
self.read().less_than(other,bit_length,security)
greater_than = lambda self,other,bit_length=None,security=None: \
self.read().greater_than(other,bit_length,security)
less_equal = lambda self,other,bit_length=None,security=None: \
self.read().less_equal(other,bit_length,security)
greater_equal = lambda self,other,bit_length=None,security=None: \
self.read().greater_equal(other,bit_length,security)
equal = lambda self,other,bit_length=None,security=None: \
self.read().equal(other,bit_length,security)
not_equal = lambda self,other,bit_length=None,security=None: \
self.read().not_equal(other,bit_length,security)
pow2 = lambda self,*args,**kwargs: self.read().pow2(*args, **kwargs)
mod2m = lambda self,*args,**kwargs: self.read().mod2m(*args, **kwargs)
right_shift = lambda self,*args,**kwargs: self.read().right_shift(*args, **kwargs)
bit_decompose = lambda self,*args,**kwargs: self.read().bit_decompose(*args, **kwargs)
if_else = lambda self,*args,**kwargs: self.read().if_else(*args, **kwargs)
expand_to_vector = lambda self,*args,**kwargs: \
self.read().expand_to_vector(*args, **kwargs)
def __repr__(self):
return 'MemValue(%s,%d)' % (self.value_type, self.address)
class MemFloat(_mem):
def __init__(self, *args):
value = sfloat(*args)
self.v = MemValue(value.v)
self.p = MemValue(value.p)
self.z = MemValue(value.z)
self.s = MemValue(value.s)
def write(self, *args):
value = sfloat(*args)
self.v.write(value.v)
self.p.write(value.p)
self.z.write(value.z)
self.s.write(value.s)
def read(self):
return sfloat(self.v, self.p, self.z, self.s)
class MemFix(_mem):
def __init__(self, *args):
arg_type = type(*args)
if arg_type == sfix:
value = sfix(*args)
elif arg_type == cfix:
value = cfix(*args)
else:
raise CompilerError('MemFix init argument error')
self.reg_type = value.v.reg_type
self.v = MemValue(value.v)
def write(self, *args):
value = sfix(*args)
self.v.write(value.v)
def reveal(self):
return cfix(self.v.reveal())
def read(self):
val = self.v.read()
if isinstance(val, sint):
return sfix(val)
else:
return cfix(val)
def getNamedTupleType(*names):
class NamedTuple(object):
class NamedTupleArray(object):
def __init__(self, size, t):
from . import types
self.arrays = [types.Array(size, t) for i in range(len(names))]
def __getitem__(self, index):
return NamedTuple(array[index] for array in self.arrays)
def __setitem__(self, index, item):
for array,value in zip(self.arrays, item):
array[index] = value
@classmethod
def get_array(cls, size, t):
return cls.NamedTupleArray(size, t)
def __init__(self, *args):
if len(args) == 1:
args = args[0]
for name, value in zip(names, args):
self.__dict__[name] = value
def __iter__(self):
for name in names:
yield self.__dict__[name]
def __add__(self, other):
return NamedTuple(i + j for i,j in zip(self, other))
def __sub__(self, other):
return NamedTuple(i - j for i,j in zip(self, other))
def __xor__(self, other):
return NamedTuple(i ^ j for i,j in zip(self, other))
def __mul__(self, other):
return NamedTuple(other * i for i in self)
__rmul__ = __mul__
__rxor__ = __xor__
def reveal(self):
return self.__type__(x.reveal() for x in self)
return NamedTuple
from . import library
| __le__ |
mark_tasks.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Marks tasks APIs."""
import datetime
from typing import Iterable
from sqlalchemy import or_
from airflow.jobs import BackfillJob
from airflow.models import BaseOperator, DagRun, TaskInstance
from airflow.operators.subdag_operator import SubDagOperator
from airflow.utils import timezone
from airflow.utils.db import provide_session
from airflow.utils.state import State
def _create_dagruns(dag, execution_dates, state, run_id_template):
"""
Infers from the dates which dag runs need to be created and does so.
:param dag: the dag to create dag runs for
:param execution_dates: list of execution dates to evaluate
:param state: the state to set the dag run to
:param run_id_template:the template for run id to be with the execution date
:return: newly created and existing dag runs for the execution dates supplied
"""
# find out if we need to create any dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates)
dates_to_create = list(set(execution_dates) - {dag_run.execution_date for dag_run in dag_runs})
for date in dates_to_create:
dag_run = dag.create_dagrun(
run_id=run_id_template.format(date.isoformat()),
execution_date=date,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
)
dag_runs.append(dag_run)
return dag_runs
@provide_session
def set_state(
tasks, # type: Iterable[BaseOperator]
execution_date, # type: datetime.datetime
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
commit=False,
session=None): # pylint: disable=too-many-arguments,too-many-locals
"""
Set the state of a task instance and if needed its relatives. Can set state
for future tasks (calculated from execution_date) and retroactively
for past tasks. Will verify integrity of past dag runs in order to create
tasks that did not exist. It will not create dag runs that are missing
on the schedule (but it will as for subdag dag runs if needed).
:param task: the task from which to work. task.task.dag needs to be set
:param execution_date: the execution date from which to start looking
:param upstream: Mark all parents (upstream tasks)
:param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags
:param future: Mark all future tasks on the interval of the dag up until
last execution date.
:param past: Retroactively mark all tasks starting from start_date of the DAG
:param state: State to which the tasks need to be set
:param commit: Commit tasks to be altered to the database
:param session: database session
:return: list of tasks that have been created and updated
"""
if not tasks:
return []
if not timezone.is_localized(execution_date):
raise ValueError("Received non-localized date {}".format(execution_date))
task_dags = {task.dag for task in tasks}
if len(task_dags) > 1:
raise ValueError("Received tasks from multiple DAGs: {}".format(task_dags))
dag = next(iter(task_dags))
if dag is None:
raise ValueError("Received tasks with no DAG")
dates = get_execution_dates(dag, execution_date, future, past)
task_ids = list(find_task_relatives(tasks, downstream, upstream))
confirmed_dates = verify_dag_run_integrity(dag, dates)
sub_dag_run_ids = get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates)
# now look for the task instances that are affected
qry_dag = get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates)
if commit:
tis_altered = qry_dag.with_for_update().all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.with_for_update().all()
for task_instance in tis_altered:
task_instance.state = state
else:
tis_altered = qry_dag.all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.all()
return tis_altered
# Flake and pylint disagree about correct indents here
def all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates): # noqa: E123
"""Get *all* tasks of the sub dags"""
qry_sub_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id.in_(sub_dag_run_ids),
TaskInstance.execution_date.in_(confirmed_dates) # noqa: E123
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
) # noqa: E123
return qry_sub_dag
def get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates): # noqa: E123
|
def get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates):
"""Go through subdag operators and create dag runs. We will only work
within the scope of the subdag. We wont propagate to the parent dag,
but we will propagate from parent to subdag.
"""
dags = [dag]
sub_dag_ids = []
while dags:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator):
# this works as a kind of integrity check
# it creates missing dag runs for subdag operators,
# maybe this should be moved to dagrun.verify_integrity
dag_runs = _create_dagruns(current_task.subdag,
execution_dates=confirmed_dates,
state=State.RUNNING,
run_id_template=BackfillJob.ID_FORMAT_PREFIX)
verify_dagruns(dag_runs, commit, state, session, current_task)
dags.append(current_task.subdag)
sub_dag_ids.append(current_task.subdag.dag_id)
return sub_dag_ids
def verify_dagruns(dag_runs, commit, state, session, current_task):
"""Verifies integrity of dag_runs.
:param dag_runs: dag runs to verify
:param commit: whether dag runs state should be updated
:param state: state of the dag_run to set if commit is True
:param session: session to use
:param current_task: current task
:return:
"""
for dag_run in dag_runs:
dag_run.dag = current_task.subdag
dag_run.verify_integrity()
if commit:
dag_run.state = state
session.merge(dag_run)
def verify_dag_run_integrity(dag, dates):
"""Verify the integrity of the dag runs in case a task was added or removed
set the confirmed execution dates as they might be different
from what was provided
"""
confirmed_dates = []
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=dates)
for dag_run in dag_runs:
dag_run.dag = dag
dag_run.verify_integrity()
confirmed_dates.append(dag_run.execution_date)
return confirmed_dates
def find_task_relatives(tasks, downstream, upstream):
"""Yield task ids and optionally ancestor and descendant ids."""
for task in tasks:
yield task.task_id
if downstream:
for relative in task.get_flat_relatives(upstream=False):
yield relative.task_id
if upstream:
for relative in task.get_flat_relatives(upstream=True):
yield relative.task_id
def get_execution_dates(dag, execution_date, future, past):
"""Returns dates of DAG execution"""
latest_execution_date = dag.latest_execution_date
if latest_execution_date is None:
raise ValueError("Received non-localized date {}".format(execution_date))
# determine date range of dag runs and tasks to consider
end_date = latest_execution_date if future else execution_date
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if dag.schedule_interval == '@once':
dates = [start_date]
elif not dag.schedule_interval:
# If schedule_interval is None, need to look at existing DagRun if the user wants future or
# past runs.
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date)
dates = sorted({d.execution_date for d in dag_runs})
else:
dates = dag.date_range(start_date=start_date, end_date=end_date)
return dates
@provide_session
def _set_dag_run_state(dag_id, execution_date, state, session=None):
"""
Helper method that set dag run state in the DB.
:param dag_id: dag_id of target dag run
:param execution_date: the execution date from which to start looking
:param state: target state
:param session: database session
"""
dag_run = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).one()
dag_run.state = state
if state == State.RUNNING:
dag_run.start_date = timezone.utcnow()
dag_run.end_date = None
else:
dag_run.end_date = timezone.utcnow()
session.merge(dag_run)
@provide_session
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its task instances
to success.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to success.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session)
# Mark all task instances of the dag run to success.
for task in dag.tasks:
task.dag = dag
return set_state(tasks=dag.tasks, execution_date=execution_date,
state=State.SUCCESS, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its running task instances
to failed.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: AssertionError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to failed.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session)
# Mark only RUNNING task instances.
task_ids = [task.task_id for task in dag.tasks]
tis = session.query(TaskInstance).filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date == execution_date,
TaskInstance.task_id.in_(task_ids)).filter(TaskInstance.state == State.RUNNING)
task_ids_of_running_tis = [task_instance.task_id for task_instance in tis]
tasks = []
for task in dag.tasks:
if task.task_id not in task_ids_of_running_tis:
continue
task.dag = dag
tasks.append(task)
return set_state(tasks=tasks, execution_date=execution_date,
state=State.FAILED, commit=commit, session=session)
@provide_session
def set_dag_run_state_to_running(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date to running.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
"""
res = []
if not dag or not execution_date:
return res
# Mark the dag run to running.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.RUNNING, session)
# To keep the return type consistent with the other similar functions.
return res
| """Get all tasks of the main dag that will be affected by a state change"""
qry_dag = session.query(TaskInstance).\
filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date.in_(confirmed_dates),
TaskInstance.task_id.in_(task_ids) # noqa: E123
).\
filter(
or_(
TaskInstance.state.is_(None),
TaskInstance.state != state
)
)
return qry_dag |
index.js | import app from './src/boot.js' |
||
core.rs | // Copyright 2015, Paul Osborne <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/license/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use byteorder::{ByteOrder, LittleEndian};
use std::error::Error;
/// Interface to an I2C Slave Device from an I2C Master
///
/// Typical implementations will store state with references to the bus
/// in use and the address of the slave device. The trait is based on the
/// Linux i2cdev interface.
pub trait I2CDevice {
type Error: Error;
/// Read data from the device to fill the provided slice
fn read(&mut self, data: &mut [u8]) -> Result<(), Self::Error>;
/// Write the provided buffer to the device
fn write(&mut self, data: &[u8]) -> Result<(), Self::Error>;
/// This sends a single bit to the device, at the place of the Rd/Wr bit
fn smbus_write_quick(&mut self, bit: bool) -> Result<(), Self::Error>;
/// Read a single byte from a device, without specifying a device register
///
/// Some devices are so simple that this interface is enough; for
/// others, it is a shorthand if you want to read the same register as in
/// the previous SMBus command.
fn smbus_read_byte(&mut self) -> Result<u8, Self::Error> {
let mut buf = [0_u8];
try!(self.read(&mut buf));
Ok(buf[0])
}
/// Write a single byte to a device, without specifying a device register
///
/// This is the opposite operation as smbus_read_byte. As with read_byte,
/// no register is specified.
fn smbus_write_byte(&mut self, value: u8) -> Result<(), Self::Error> {
self.write(&mut [value])
}
/// Read a single byte from a device, from a designated register
///
/// The register is specified through the Comm byte.
fn smbus_read_byte_data(&mut self, register: u8) -> Result<u8, Self::Error> {
try!(self.smbus_write_byte(register));
self.smbus_read_byte()
}
/// Write a single byte to a specific register on a device
///
/// The register is specified through the Comm byte.
fn | (&mut self, register: u8, value: u8) -> Result<(), Self::Error> {
self.write(&mut [register, value])
}
/// Read 2 bytes from a given register on a device (lsb first)
fn smbus_read_word_data(&mut self, register: u8) -> Result<u16, Self::Error> {
let mut buf: [u8; 2] = [0x00; 2];
try!(self.smbus_write_byte(register));
try!(self.read(&mut buf));
Ok(LittleEndian::read_u16(&buf))
}
/// Write 2 bytes to a given register on a device (lsb first)
fn smbus_write_word_data(&mut self, register: u8, value: u16) -> Result<(), Self::Error> {
let mut buf: [u8; 3] = [register, 0, 0];
LittleEndian::write_u16(&mut buf[1..], value);
self.write(&buf)
}
/// Select a register, send 16 bits of data to it, and read 16 bits of data
fn smbus_process_word(&mut self, register: u8, value: u16) -> Result<u16, Self::Error> {
let mut buf: [u8; 2] = [0x00; 2];
try!(self.smbus_write_word_data(register, value));
try!(self.read(&mut buf));
Ok(LittleEndian::read_u16(&buf))
}
/// Read a block of up to 32 bytes from a device
///
/// The actual number of bytes available to read is returned in the count
/// byte. This code returns a correctly sized vector containing the
/// count bytes read from the device.
fn smbus_read_block_data(&mut self, register: u8) -> Result<Vec<u8>, Self::Error>;
/// Read a block of up to 32 bytes from a device
///
/// Uses read_i2c_block_data instead read_block_data.
fn smbus_read_i2c_block_data(&mut self, register: u8, len: u8) -> Result<Vec<u8>, Self::Error>;
/// Write a block of up to 32 bytes to a device
///
/// The opposite of the Block Read command, this writes up to 32 bytes to
/// a device, to a designated register that is specified through the
/// Comm byte. The amount of data is specified in the Count byte.
fn smbus_write_block_data(&mut self, register: u8, values: &[u8]) -> Result<(), Self::Error>;
/// Write a block of up to 32 bytes from a device
///
/// Uses write_i2c_block_data instead write_block_data.
fn smbus_write_i2c_block_data(&mut self, register: u8, values: &[u8]) -> Result<(), Self::Error>;
/// Select a register, send 1 to 31 bytes of data to it, and reads
/// 1 to 31 bytes of data from it.
fn smbus_process_block(&mut self, register: u8, values: &[u8]) -> Result<Vec<u8>, Self::Error>;
}
| smbus_write_byte_data |
constants.js | 'use strict';
module.exports = {
// Versioning used for the story files.
// The story files are the stories written by authors.
STORY_FILES_VERSION: 1, | STORY_OBJECT_VERSION: 1,
// Maximum recursion allowed for recursive functions.
// To prevent bugs from killing the program.
MAX_RECURSION: 5,
// How close input has to be to a known accepted phrase
// to be considered a match or a possible suggestion.
// 0.9 == 90% of input has to match a known phrase
RATING_MATCH: 0.9,
RATING_SUGGESTION: 0.5,
// Directories.
DIRECTORY_ACTIONS: 'actions',
DIRECTORY_ENTITIES: 'entities',
DIRECTORY_ROOT: '',
// Root config file name.
FILE_NAME_STORY: 'story',
// Keys.
KEY_ACTION: 'action',
KEY_ACTIONS: 'actions',
KEY_AUTHOR: 'author',
KEY_CHANGE: 'change',
KEY_DEFAULT: 'default',
KEY_DESCRIBE: 'describe',
KEY_DESCRIPTION: 'description',
KEY_DISABLE: 'disable',
KEY_ENABLE: 'enable',
KEY_ENTITIES: 'entities',
KEY_ENTITY: 'entity',
KEY_ENTITY_PLACEHOLDER: '@entity',
KEY_FOR: 'for',
KEY_IS: 'is',
KEY_MESSAGE: 'message',
KEY_NONE: '.none',
KEY_PROPERTY_PLACEHOLDER: '@property',
KEY_REVERT: '.revert',
KEY_RULES: 'rules',
KEY_SYNONYMS: 'synonyms',
KEY_TEMPLATES: 'templates',
KEY_TEXT: 'text',
KEY_TITLE: 'title',
KEY_VALUE: 'value',
KEY_VALUE_PLACEHOLDER: '@value',
KEY_VERSION: 'version',
KEY_WHEN: 'when',
// File types.
TYPE_DOT: 'dot',
TYPE_MARKDOWN: 'markdown',
TYPE_YAML: 'yaml',
// Markdown parser strings.
MD_MARKDOWN: 'markdown',
MD_HEADER: 'header',
MD_PARAGRAPH: 'para',
// Separator for paths.
PATH_SEP: '.'
} |
// Versioning used for the story object.
// The story object is the internal representation. |
test_unit_test_inspect.py | #!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import os.path as path
from bes.fs.file_util import file_util
from bes.fs.temp_file import temp_file
from bes.testing.unit_test import unit_test
from bes.testing.framework import unit_test_inspect as UTI
from bes.testing.unit_test_skip import raise_skip
class test_unit_test_inspect(unit_test):
@classmethod
def setUpClass(clazz):
raise_skip('broken')
def | (self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_not_unit_test(self):
content = '''
class test_apple_fixture(object):
def test_foo(self):
pass
def test_bar(self):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [], UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_disbled(self):
content = '''
import unittest
class test_apple_fixture(unittest.TestCase):
def xtest_foo(self):
self.assertEqual( 6, 3 + 3 )
def xtest_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def doesnt_work_test_inspect_file_TestCase_subclass(self):
content = '''
import unittest
class unit_super(unittest.TestCase):
_x = 5
class test_apple_fixture(unit_super):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
class somthing(unittest.TestCase):
pass
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
def test_inspect_file_unit_test(self):
content = '''
from bes.testing.unit_test import unit_test
class test_apple_fixture(unit_test):
def test_foo(self):
self.assertEqual( 6, 3 + 3 )
def test_bar(self):
self.assertEqual( 7, 3 + 4 )
'''
filename = temp_file.make_temp_file(content = content, suffix = '.py')
self.assertEqual( [
( filename, 'test_apple_fixture', 'test_foo' ),
( filename, 'test_apple_fixture', 'test_bar' ),
],
UTI.inspect_file(filename) )
file_util.remove(filename)
if __name__ == '__main__':
unit_test.main()
| test_inspect_file |
__init__.py | structure and defines the data to show when the swagger is activated.
It does not contain the html and css files used to create the page,
only the underlying structure. The html and css can be found at the static module.
""" | """
swagger module -
A package defining the swagger features. This module creates the swagger |
|
trigger.helper.ts | import { Injectable, Inject } from '@angular/core';
import { fromEvent as observableFromEvent } from 'rxjs/observable/fromEvent';
import { empty as observableEmpty } from 'rxjs/observable/empty';
import { filter } from 'rxjs/operators/filter';
import { delay } from 'rxjs/operators/delay';
import { takeWhile } from 'rxjs/operators/takeWhile';
import { debounceTime } from 'rxjs/operators/debounceTime';
import { switchMap } from 'rxjs/operators/switchMap';
import { repeat } from 'rxjs/operators/repeat';
import { takeUntil } from 'rxjs/operators/takeUntil';
import { NB_DOCUMENT } from '../../../theme.options';
import { NbPopoverMode, NbPopoverTrigger } from './model';
/**
* Describes popover triggers strategies based on popover {@link NbPopoverMode} mode.
* */
const NB_TRIGGERS = {
/**
* Creates toggle and close events streams based on popover {@link NbPopoverMode#CLICK} mode.
* Fires toggle event when click was performed on the host element.
* Fires close event when click was performed on the document but
* not on the host or container or popover container isn't rendered yet.
*
* @param host {HTMLElement} popover host element.
* @param getContainer {Function} popover container getter.
* @param document {Document} document ref.
*
* @return {NbPopoverTrigger} open and close events streams.
* */
[NbPopoverMode.CLICK](host: HTMLElement, getContainer: Function, document: Document): NbPopoverTrigger {
return {
open: observableEmpty(),
close: observableFromEvent<Event>(document, 'click')
.pipe(
filter(event => !host.contains(event.target as Node)
&& getContainer()
&& !getContainer().location.nativeElement.contains(event.target)),
),
toggle: observableFromEvent(host, 'click'),
};
},
/**
* Creates open and close events streams based on popover {@link NbPopoverMode#HOVER} mode.
* Fires open event when mouse hovers over the host element and stay over at least 100 milliseconds.
* Fires close event when mouse leaves the host element and stops out of the host and popover container.
*
* @param host {HTMLElement} popover host element.
* @param getContainer {Function} popover container getter.
* @param document {Document} document ref.
*
* @return {NbPopoverTrigger} open and close events streams.
* */
[NbPopoverMode.HOVER](host: HTMLElement, getContainer: Function, document: Document): NbPopoverTrigger {
return {
open: observableFromEvent<Event>(host, 'mouseenter')
.pipe(
delay(100),
takeUntil(observableFromEvent(host, 'mouseleave')),
repeat(),
),
close: observableFromEvent<Event>(host, 'mouseleave')
.pipe(
switchMap(() => observableFromEvent<Event>(document, 'mousemove')
.pipe(
debounceTime(100),
takeWhile(() => !!getContainer()),
filter(event => !host.contains(event.target as Node)
&& !getContainer().location.nativeElement.contains(event.target),
),
),
),
),
toggle: observableEmpty(), |
/**
* Creates open and close events streams based on popover {@link NbPopoverMode#HOVER} mode.
* Fires open event when mouse hovers over the host element and stay over at least 100 milliseconds.
* Fires close event when mouse leaves the host element.
*
* @param host {HTMLElement} popover host element.
*
* @return {NbPopoverTrigger} open and close events streams.
* */
[NbPopoverMode.HINT](host: HTMLElement): NbPopoverTrigger {
return {
open: observableFromEvent<Event>(host, 'mouseenter')
.pipe(
delay(100),
takeUntil(observableFromEvent(host, 'mouseleave')),
repeat(),
),
close: observableFromEvent(host, 'mouseleave'),
toggle: observableEmpty(),
}
},
};
@Injectable()
export class NbTriggerHelper {
constructor(@Inject(NB_DOCUMENT) private document) {
}
/**
* Creates open and close events streams based on popover {@link NbPopoverMode} mode.
*
* @param host {HTMLElement} popover host element.
* @param getContainer {Function} popover container getter.
* Getter required because listen can be called when container isn't initialized.
* @param mode {NbPopoverMode} describes container triggering strategy.
*
* @return {NbPopoverTrigger} open and close events streams.
* */
createTrigger(host: HTMLElement, getContainer: Function, mode: NbPopoverMode): NbPopoverTrigger {
const createTrigger = NB_TRIGGERS[mode];
return createTrigger.call(NB_TRIGGERS, host, getContainer, this.document);
}
} | }
}, |
FisherFaceRecognizer.py | from SimpleCV import *
import time
"""
This is an example of HOW-TO use FaceRecognizer to recognize gender
of the person.
"""
def identifyGender():
|
identifyGender()
| f = FaceRecognizer()
cam = Camera()
img = cam.getImage()
cascade = LAUNCH_PATH + "/" + "Features/HaarCascades/face.xml"
feat = img.findHaarFeatures(cascade)
if feat:
crop_image = feat.sortArea()[-1].crop()
feat.sortArea()[-1].draw()
f.load(LAUNCH_PATH + "/" + "Features/FaceRecognizerData/GenderData.xml")
w, h = f.imageSize
crop_image = crop_image.resize(w, h)
label, confidence = f.predict(crop_image)
print label
if label == 0:
img.drawText("Female", fontsize=48)
else:
img.drawText("Male", fontsize=48)
img.show()
time.sleep(4) |
_Cisco_IOS_XR_shellutil_filesystem_oper.py | import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'FileSystem.Node.FileSystem_' : {
'meta_info' : _MetaInfoClass('FileSystem.Node.FileSystem_',
False,
[
_MetaInfoClassMember('flags', ATTRIBUTE, 'str' , None, None,
[], [],
''' Flags of file system
''',
'flags',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('free', ATTRIBUTE, 'str' , None, None,
[], [],
''' Free space in the file system in bytes
''',
'free',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('prefixes', ATTRIBUTE, 'str' , None, None,
[], [],
''' Prefixes of file system
''',
'prefixes',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('size', ATTRIBUTE, 'str' , None, None,
[], [],
''' Size of the file system in bytes
''',
'size',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
_MetaInfoClassMember('type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Type of file system
''',
'type',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'file-system',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
'FileSystem.Node' : {
'meta_info' : _MetaInfoClass('FileSystem.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Node name
''',
'node_name',
'Cisco-IOS-XR-shellutil-filesystem-oper', True),
_MetaInfoClassMember('file-system', REFERENCE_LIST, 'FileSystem_' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node.FileSystem_',
[], [],
''' Available file systems
''',
'file_system',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
'FileSystem' : {
'meta_info' : _MetaInfoClass('FileSystem',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper', 'FileSystem.Node',
[], [],
''' Node ID
''',
'node',
'Cisco-IOS-XR-shellutil-filesystem-oper', False),
],
'Cisco-IOS-XR-shellutil-filesystem-oper',
'file-system',
_yang_ns._namespaces['Cisco-IOS-XR-shellutil-filesystem-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_shellutil_filesystem_oper'
),
},
} | _meta_table['FileSystem.Node.FileSystem_']['meta_info'].parent =_meta_table['FileSystem.Node']['meta_info']
_meta_table['FileSystem.Node']['meta_info'].parent =_meta_table['FileSystem']['meta_info'] |
|
AmendTicket.js | import React from 'react';
import { StyleSheet, TouchableOpacity, View, TextInput, KeyboardAvoidingView } from 'react-native';
import { Accordion, Button, Container, Content, Input, Item, StyleProvider, Text } from 'native-base';
import DateTimePicker from 'react-native-modal-datetime-picker';
import Icon from 'react-native-vector-icons/MaterialIcons';
import moment from 'moment';
import getTheme from '../../native-base-theme/components';
import platform from '../../native-base-theme/variables/platform';
import GlobalHeader from '../../components/GlobalHeader';
import ip from '../../server/keys/ipstore';
import colors from '../../constants/Colors';
import { connect } from 'react-redux';
import { amendTicket } from '../../redux/actions/ticketAction';
import { postRequestAuthorized } from '../../API';
import SummaryRow from '../../components/SummaryRow';
import CustomDateTimePicker from '../../components/CustomDateTimePicker';
import CustomInput from '../../components/CustomInput';
class AmendTicket extends React.Component {
static navigationOptions = {
header: null
};
state = {
isLoadingComplete: false,
date: null,
time: null,
numWheelchair: null,
errors: [],
wheelchairFocused: false
};
onSubmit = () => {
const ticket = this.props.navigation.state.params.ticket;
const data = {
ticketId: ticket.id,
date: this.state.date === null ? moment(ticket.date).format('YYYY-MM-DD HH:mm:ss') : this.state.date,
time: this.state.time === null ? moment(ticket.time).format('YYYY-MM-DD HH:mm:ss') : this.state.time,
numWheelchair: this.state.numWheelchair === null ? ticket.numWheelchairs : this.state.numWheelchair,
numPassenger: ticket.numPassengers
};
// Send the above data to the server
postRequestAuthorized(`http://${ip}:3000/amendTicket`, data)
.then((responseJSON) => {
switch (responseJSON.status) {
//Success
case 10:
this.props.amendTicket(data);
this.props.navigation.navigate('Ticket');
break;
//Input Validation Failed
case 0:
this.setState({
errors: this.parseErrors(responseJSON.errors)
});
break;
}
})
.catch((error) => console.log(error));
};
// If there are errors, parse them and return them in an array
parseErrors = (errorList) => {
var errors = {
title: 'Errors',
content: ''
};
for (var i = 0; i < errorList.length; i++) {
errors.content += errorList[i].msg + '\n';
}
return [errors]; | this.props.navigation.navigate('Details');
};
// Functionality to show/hide the date picker and to set the state
_showDatePicker = () => this.setState({ isDatePickerVisible: true });
_hideDatePicker = () => this.setState({ isDatePickerVisible: false });
_handleDatePicked = (newDate) => {
this.setState({ date: moment(newDate).format('YYYY-MM-DD HH:mm:ss') });
this._hideDatePicker();
};
// Functionality to show/hide the time picker and to set the state
_showTimePicker = () => this.setState({ isTimePickerVisible: true });
_hideTimePicker = () => this.setState({ isTimePickerVisible: false });
_handleTimePicked = (newTime) => {
this.setState({ time: moment(newTime).format('YYYY-MM-DD HH:mm:ss') });
this._hideTimePicker();
};
render() {
const data = this.props.navigation.state.params.ticket;
return (
<StyleProvider style={getTheme(platform)}>
<Container>
<GlobalHeader
type={3}
header="Amend Ticket Details"
navigateTo={this.navigateTo}
isBackButtonActive={1}
/>
{/* Display error messages for validation in an accordion */}
<KeyboardAvoidingView behavior="padding" style={{ flex: 1 }}>
<Content style={styles.content}>
{this.state.errors &&
!!this.state.errors.length && (
<Accordion
dataArray={this.state.errors}
icon="add"
expandedIcon="remove"
contentStyle={styles.errorStyle}
expanded={0}
/>
)}
{this.state.error && (
<Accordion
dataArray={this.state.error}
icon="add"
expandedIcon="remove"
contentStyle={styles.errorStyle}
expanded={0}
/>
)}
{/* Summary of the current ticket */}
<View style={styles.contentContainer}>
<View style={styles.summaryCard}>
<View style={styles.cardContent}>
<View style={styles.details}>
<View>
<Text style={styles.header2}>CURRENT TICKET DETAILS</Text>
<SummaryRow iconName="date-range" value={moment(data.date).format('MMMM Do YYYY')} />
<SummaryRow iconName="access-time" value={moment(data.time).format('LT')} />
<SummaryRow iconName="my-location" value={[data.fromStreet] + ", " + [data.fromCity]} />
<SummaryRow iconName="location-on" value={[data.toStreet] + ", " + [data.toCity]} />
<SummaryRow iconName="people" value={[data.numPassengers] + " " + [data.numPassengers > 1 ? 'Passengers' : 'Passenger']} />
{data.numWheelchairs > 0 &&
<SummaryRow iconName="accessible" value={[data.numWheelchairs] + " " + [data.numWheelchairs > 1 ? 'Wheelchairs' : 'Wheelchair']} />
}
</View>
</View>
</View>
</View>
{/* Inputs to amend ticket including date, time and wheelchairs */}
<View style={styles.inputs}>
<Text style={styles.body}>
If you wish to change details relating to the start/end locations or total
number of passengers, please cancel this ticket and re-book.
</Text>
{/* Date picker */}
<CustomDateTimePicker
placeholder="Date"
onPress={this._showDatePicker}
mode="date"
isVisible={this.state.isDatePickerVisible}
onConfirm={(value) => this._handleDatePicked(value)}
onCancel={this._hideDatePicker}
iconName="date-range"
format='Do MMM YY'
value={this.state.date}
/>
{/* Time picker */}
<CustomDateTimePicker
placeholder="Time"
onPress={this._showTimePicker}
mode="time"
isVisible={this.state.isTimePickerVisible}
onConfirm={(value) => this._handleTimePicked(value)}
onCancel={this._hideTimePicker}
iconName="access-time"
format='LT'
value={this.state.time}
/>
{/* Number of wheelchairs input */}
<CustomInput
focused={this.state.wheelchairFocused}
iconName="accessible"
placeholder={"No. of wheelchairs"}
value={this.state.numWheelchair ? this.state.numWheelchair.toString() : null}
onFocus={() => this.setState({ wheelchairFocused: true })}
onBlur={() => this.setState({ wheelchairFocused: false })}
onChangeText={(value) => this.setState({ numWheelchair: value })}
onRef={(ref) => (this.textInputWheelChair = ref)}
/>
{/* Submit amendments */}
<View style={styles.buttonContainer}>
<Button danger style={styles.button} onPress={this.onSubmit}>
<Text>Amend</Text>
</Button>
</View>
</View>
</View>
</Content>
</KeyboardAvoidingView>
</Container>
</StyleProvider>
);
}
}
const styles = StyleSheet.create({
content: {
flex: 1
},
contentContainer: {
flex: 1,
alignItems: 'center',
justifyContent: 'flex-end'
},
inputs: {
width: '80%',
flex: 1,
alignSelf: 'center',
marginTop: 10
},
header2: {
fontSize: 16,
color: colors.emphasisTextColor,
marginTop: 10,
marginBottom: 10
},
body: {
color: colors.bodyTextColor,
fontSize: 16
},
summaryCard: {
width: '100%',
flexDirection: 'column',
alignItems: 'center',
justifyContent: 'center',
shadowOffset: { width: 0, height: -20 },
shadowColor: 'black',
shadowOpacity: 1,
elevation: 5,
backgroundColor: colors.backgroundColor,
marginBottom: 15
},
cardContent: {
flex: 1,
flexDirection: 'row',
marginTop: 10,
width: '80%',
justifyContent: 'space-between'
},
details: {
width: '70%'
},
journeyInfo: {
flex: 1,
flexDirection: 'column',
alignItems: 'center',
width: '30%'
},
buttonContainer: {
flexDirection: 'row',
alignSelf: 'center',
marginTop: 15,
marginBottom: 15,
alignItems: 'center'
},
button: {
width: '100%',
justifyContent: 'center',
backgroundColor: colors.brandColor
}
});
const mapDispatchToProps = (dispatch) => {
return {
amendTicket: (data) => dispatch(amendTicket(data))
};
};
export default connect(null, mapDispatchToProps)(AmendTicket); | };
navigateTo = () => { |
build.go | // Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:generate go run gendex.go -o dex.go
package mobile
import (
"bufio"
"flag"
"fmt"
"go/build"
"io"
"os"
"os/exec"
"regexp"
"strings"
)
var ctx = build.Default
var pkg *build.Package // TODO(crawshaw): remove global pkg variable
var tmpdir string
var cmdBuild = &command{
run: runBuild,
Name: "build",
Usage: "[-target android|ios] [-o output] [-bundleid bundleID] [build flags] [package]",
Short: "compile android APK and iOS app",
Long: `
Build compiles and encodes the app named by the import path.
The named package must define a main function.
The -target Flag takes a target system name, either android (the
default) or ios.
For -target android, if an AndroidManifest.xml is defined in the
package directory, it is added to the APK output. Otherwise, a default
manifest is generated. By default, this builds a fat APK for all supported
instruction sets (arm, 386, amd64, arm64). A subset of instruction sets can
be selected by specifying target type with the architecture name. E.g.
-target=android/arm,android/386.
For -target ios, gomobile must be run on an OS X machine with Xcode
installed.
If the package directory contains an assets subdirectory, its contents
are copied into the output.
Flag -iosversion sets the minimal version of the iOS SDK to compile against.
The default version is 7.0.
Flag -androidapi sets the Android API version to compile against.
The default and minimum is 15.
The -bundleid Flag is required for -target ios and sets the bundle ID to use
with the app.
The -o Flag specifies the output file name. If not specified, the
output file name depends on the package built.
The -v Flag provides verbose output, including the list of packages built.
The build flags -a, -i, -n, -x, -gcflags, -ldflags, -tags, -trimpath, and -work are
shared with the build command. For documentation, see 'go help build'.
`,
}
const (
minAndroidAPI = 15
)
func runBuild(cmd *command) (err error) {
cleanup, err := buildEnvInit()
if err != nil {
return err
}
defer cleanup()
args := cmd.Flag.Args()
targetOS, targetArchs, err := parseBuildTarget(buildTarget)
if err != nil {
return fmt.Errorf(`invalid -target=%q: %v`, buildTarget, err)
}
oldCtx := ctx
defer func() {
ctx = oldCtx
}()
ctx.GOARCH = targetArchs[0]
ctx.GOOS = targetOS
if ctx.GOOS == "darwin" {
ctx.BuildTags = append(ctx.BuildTags, "ios")
}
switch len(args) {
case 0:
pkg, err = ctx.ImportDir(cwd, build.ImportComment)
case 1:
pkg, err = ctx.Import(args[0], cwd, build.ImportComment)
default:
cmd.usage()
os.Exit(1)
}
if err != nil {
return err
}
if pkg.Name != "main" && buildO != "" {
return fmt.Errorf("cannot set -o when building non-main package")
}
if buildBundleID == "" {
return fmt.Errorf("value for -appID is required for a mobile package")
}
var nmpkgs map[string]bool
switch targetOS {
case "android":
if pkg.Name != "main" {
for _, arch := range targetArchs {
env := androidEnv[arch]
if err := goBuild(pkg.ImportPath, env); err != nil {
return err
}
}
return nil
}
nmpkgs, err = goAndroidBuild(pkg, buildBundleID, targetArchs, cmd.IconPath, cmd.AppName)
if err != nil {
return err
}
case "darwin":
if !xcodeAvailable() {
return fmt.Errorf("-target=ios requires XCode")
}
if pkg.Name != "main" {
for _, arch := range targetArchs {
env := darwinEnv[arch]
if err := goBuild(pkg.ImportPath, env); err != nil {
return err
}
}
return nil
}
nmpkgs, err = goIOSBuild(pkg, buildBundleID, targetArchs, cmd.AppName)
if err != nil {
return err
}
}
if !nmpkgs["golang.org/x/mobile/app"] {
return fmt.Errorf(`%s does not import "golang.org/x/mobile/app"`, pkg.ImportPath)
}
return nil
}
var nmRE = regexp.MustCompile(`[0-9a-f]{8} t (?:.*/vendor/)?(golang.org/x.*/[^.]*)`)
func extractPkgs(nm string, path string) (map[string]bool, error) {
if buildN {
return map[string]bool{"golang.org/x/mobile/app": true}, nil
}
r, w := io.Pipe()
cmd := exec.Command(nm, path)
cmd.Stdout = w
cmd.Stderr = os.Stderr
nmpkgs := make(map[string]bool)
errc := make(chan error, 1)
go func() {
s := bufio.NewScanner(r)
for s.Scan() {
if res := nmRE.FindStringSubmatch(s.Text()); res != nil {
nmpkgs[res[1]] = true
}
}
errc <- s.Err()
}()
err := cmd.Run()
w.Close()
if err != nil {
return nil, fmt.Errorf("%s %s: %v", nm, path, err)
}
if err := <-errc; err != nil {
return nil, fmt.Errorf("%s %s: %v", nm, path, err)
}
return nmpkgs, nil
}
func importsApp(pkg *build.Package) error {
// Building a program, make sure it is appropriate for mobile.
for _, path := range pkg.Imports {
if path == "golang.org/x/mobile/app" {
return nil
}
}
return fmt.Errorf(`%s does not import "golang.org/x/mobile/app"`, pkg.ImportPath)
}
var xout io.Writer = os.Stderr
func printcmd(format string, args ...interface{}) {
cmd := fmt.Sprintf(format+"\n", args...)
if tmpdir != "" {
cmd = strings.Replace(cmd, tmpdir, "$WORK", -1)
}
if androidHome := os.Getenv("ANDROID_HOME"); androidHome != "" {
cmd = strings.Replace(cmd, androidHome, "$ANDROID_HOME", -1)
}
if gomobilepath != "" {
cmd = strings.Replace(cmd, gomobilepath, "$GOMOBILE", -1)
}
if gopath := goEnv("GOPATH"); gopath != "" {
cmd = strings.Replace(cmd, gopath, "$GOPATH", -1)
}
if env := os.Getenv("HOMEPATH"); env != "" {
cmd = strings.Replace(cmd, env, "$HOMEPATH", -1)
}
fmt.Fprint(xout, cmd)
}
// "Build flags", used by multiple commands.
var (
buildA bool // -a
buildI bool // -i
buildN bool // -n
buildV bool // -v
buildX bool // -x
buildO string // -o
buildGcflags string // -gcflags
buildLdflags string // -ldflags
buildRelease bool // -release
buildTarget string // -target
buildTrimpath bool // -trimpath
buildWork bool // -work
buildBundleID string // -bundleid
buildIOSVersion string // -iosversion
buildAndroidAPI int // -androidapi
)
func RunNewBuild(target, appID, icon, name string, release bool) error {
buildTarget = target
buildBundleID = appID
buildRelease = release
cmd := cmdBuild
cmd.Flag = flag.FlagSet{}
cmd.IconPath = icon
cmd.AppName = name
return runBuild(cmd)
}
func addBuildFlags(cmd *command) {
cmd.Flag.StringVar(&buildO, "o", "", "")
cmd.Flag.StringVar(&buildGcflags, "gcflags", "", "")
cmd.Flag.StringVar(&buildLdflags, "ldflags", "", "")
cmd.Flag.StringVar(&buildTarget, "target", "android", "")
cmd.Flag.StringVar(&buildBundleID, "bundleid", "", "")
cmd.Flag.StringVar(&buildIOSVersion, "iosversion", "7.0", "")
cmd.Flag.IntVar(&buildAndroidAPI, "androidapi", minAndroidAPI, "")
cmd.Flag.BoolVar(&buildA, "a", false, "")
cmd.Flag.BoolVar(&buildI, "i", false, "")
cmd.Flag.BoolVar(&buildTrimpath, "trimpath", false, "")
cmd.Flag.Var((*stringsFlag)(&ctx.BuildTags), "tags", "")
}
func addBuildFlagsNVXWork(cmd *command) {
cmd.Flag.BoolVar(&buildN, "n", false, "")
cmd.Flag.BoolVar(&buildV, "v", false, "")
cmd.Flag.BoolVar(&buildX, "x", false, "")
cmd.Flag.BoolVar(&buildWork, "work", false, "")
}
type binInfo struct {
hasPkgApp bool
hasPkgAL bool
}
func init() {
addBuildFlags(cmdBuild)
addBuildFlagsNVXWork(cmdBuild)
addBuildFlagsNVXWork(cmdClean)
}
func goBuild(src string, env []string, args ...string) error {
return goCmd("build", []string{src}, env, args...)
}
func goInstall(srcs []string, env []string, args ...string) error {
return goCmd("install", srcs, env, args...)
}
func | (subcmd string, srcs []string, env []string, args ...string) error {
cmd := exec.Command(
goBin(),
subcmd,
)
if len(ctx.BuildTags) > 0 {
cmd.Args = append(cmd.Args, "-tags", strings.Join(ctx.BuildTags, " "))
}
if buildV {
cmd.Args = append(cmd.Args, "-v")
}
if subcmd != "install" && buildI {
cmd.Args = append(cmd.Args, "-i")
}
if buildX {
cmd.Args = append(cmd.Args, "-x")
}
if buildGcflags != "" {
cmd.Args = append(cmd.Args, "-gcflags", buildGcflags)
}
if buildLdflags != "" {
cmd.Args = append(cmd.Args, "-ldflags", buildLdflags)
}
if buildTrimpath {
cmd.Args = append(cmd.Args, "-trimpath")
}
if buildWork {
cmd.Args = append(cmd.Args, "-work")
}
cmd.Args = append(cmd.Args, args...)
cmd.Args = append(cmd.Args, srcs...)
cmd.Env = append([]string{}, env...)
// gomobile does not support modules yet.
cmd.Env = append(cmd.Env, "GO111MODULE=off")
return runCmd(cmd)
}
func parseBuildTarget(buildTarget string) (os string, archs []string, _ error) {
if buildTarget == "" {
return "", nil, fmt.Errorf(`invalid target ""`)
}
all := false
archNames := []string{}
for i, p := range strings.Split(buildTarget, ",") {
osarch := strings.SplitN(p, "/", 2) // len(osarch) > 0
if osarch[0] != "android" && osarch[0] != "ios" {
return "", nil, fmt.Errorf(`unsupported os`)
}
if i == 0 {
os = osarch[0]
}
if os != osarch[0] {
return "", nil, fmt.Errorf(`cannot target different OSes`)
}
if len(osarch) == 1 {
all = true
} else {
archNames = append(archNames, osarch[1])
}
}
// verify all archs are supported one while deduping.
isSupported := func(arch string) bool {
for _, a := range allArchs {
if a == arch {
return true
}
}
return false
}
seen := map[string]bool{}
for _, arch := range archNames {
if _, ok := seen[arch]; ok {
continue
}
if !isSupported(arch) {
return "", nil, fmt.Errorf(`unsupported arch: %q`, arch)
}
seen[arch] = true
archs = append(archs, arch)
}
targetOS := os
if os == "ios" {
targetOS = "darwin"
}
if all {
return targetOS, allArchs, nil
}
return targetOS, archs, nil
}
| goCmd |
view.tsx | import * as React from 'react';
import { Login } from '../login';
import { Settings } from '../settings';
import './style.css';
export interface AppStateProps {
isAuthenticated: boolean;
isInitialised: boolean;
isInitialising: boolean;
view: string;
}
export interface AppDispatchProps {
initialise: () => void;
}
export type AppProps = AppStateProps & AppDispatchProps;
export const AppView = (props: AppProps) => {
if (!props.isInitialised && !props.isInitialising) {
props.initialise();
}
return (
<main className="main-view">
{props.isInitialised && renderView(props.view)}
{!props.isInitialised && <div className="loading-bar" data-role="activity" data-type="metro" data-style="dark"></div>}
</main>
);
};
function renderView(view: string) { | switch (view) {
case 'login': return <Login />;
case 'settings': return <Settings />;
}
return null;
} | |
context.js | import _ from 'lodash';
import { SearchSourceProvider } from 'ui/courier/data_source/search_source';
import { reverseSortDirective } from './utils/sorting';
function fetchContextProvider(courier, Private) {
const SearchSource = Private(SearchSourceProvider);
return {
fetchPredecessors,
fetchSuccessors,
};
async function | (indexPatternId, anchorDocument, contextSort, size, filters) {
const successorsSearchSource = await createSearchSource(
indexPatternId,
anchorDocument,
contextSort,
size,
filters,
);
const results = await performQuery(successorsSearchSource);
return results;
}
async function fetchPredecessors(indexPatternId, anchorDocument, contextSort, size, filters) {
const predecessorsSort = contextSort.map(reverseSortDirective);
const predecessorsSearchSource = await createSearchSource(
indexPatternId,
anchorDocument,
predecessorsSort,
size,
filters,
);
const reversedResults = await performQuery(predecessorsSearchSource);
const results = reversedResults.slice().reverse();
return results;
}
async function createSearchSource(indexPatternId, anchorDocument, sort, size, filters) {
const indexPattern = await courier.indexPatterns.get(indexPatternId);
return new SearchSource()
.inherits(false)
.set('index', indexPattern)
.set('version', true)
.set('size', size)
.set('filter', filters)
.set('query', {
match_all: {},
})
.set('searchAfter', anchorDocument.sort)
.set('sort', sort);
}
async function performQuery(searchSource) {
const response = await searchSource.fetchAsRejectablePromise();
return _.get(response, ['hits', 'hits'], []);
}
}
export {
fetchContextProvider,
};
| fetchSuccessors |
send_notification.py | from flask import current_app
from notifications_utils.s3 import S3ObjectNotFound
from notifications_utils.s3 import s3download as utils_s3download
from sqlalchemy.orm.exc import NoResultFound
from app import create_random_identifier
from app.dao.notifications_dao import _update_notification_status
from app.dao.service_email_reply_to_dao import dao_get_reply_to_by_id
from app.dao.service_sms_sender_dao import dao_get_service_sms_senders_by_id
from app.dao.services_dao import dao_fetch_service_by_id
from app.dao.templates_dao import (
dao_get_template_by_id_and_service_id,
get_precompiled_letter_template,
)
from app.dao.users_dao import get_user_by_id
from app.letters.utils import (
get_letter_pdf_filename,
get_page_count,
move_uploaded_pdf_to_letters_bucket,
)
from app.models import (
EMAIL_TYPE,
KEY_TYPE_NORMAL,
LETTER_TYPE,
NOTIFICATION_DELIVERED,
SMS_TYPE,
UPLOAD_LETTERS,
)
from app.notifications.process_notifications import (
persist_notification,
send_notification_to_queue,
)
from app.notifications.validators import (
check_service_has_permission,
check_service_over_daily_message_limit,
validate_and_format_recipient,
validate_template,
)
from app.v2.errors import BadRequestError
def validate_created_by(service, created_by_id):
user = get_user_by_id(created_by_id)
if service not in user.services:
message = 'Can’t create notification - {} is not part of the "{}" service'.format(user.name, service.name)
raise BadRequestError(message=message)
def create_one_off_reference(template_type):
if template_type == LETTER_TYPE:
return create_random_identifier()
return None
def send_one_off_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
template = dao_get_template_by_id_and_service_id(template_id=post_data["template_id"], service_id=service_id)
personalisation = post_data.get("personalisation", None)
validate_template(template.id, personalisation, service, template.template_type)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_and_format_recipient(
send_to=post_data["to"],
key_type=KEY_TYPE_NORMAL,
service=service,
notification_type=template.template_type,
allow_safelisted_recipients=False,
)
validate_created_by(service, post_data["created_by"])
sender_id = post_data.get("sender_id", None)
reply_to = get_reply_to_text(
notification_type=template.template_type,
sender_id=sender_id,
service=service,
template=template,
)
notification = persist_notification(
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["to"],
service=service,
personalisation=personalisation,
notification_type=template.template_type,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
created_by_id=post_data["created_by"],
reply_to_text=reply_to,
reference=create_one_off_reference(template.template_type),
)
if template.template_type == LETTER_TYPE and service.research_mode:
_update_notification_status(
notification,
NOTIFICATION_DELIVERED,
)
else:
send_notification_to_queue(
notification=notification,
research_mode=service.research_mode,
queue=template.queue_to_use(),
)
return {"id": str(notification.id)}
def get_reply_to_text(notification_type, sender_id, service, template):
reply_to = None
if sender_id:
try:
if notification_type == EMAIL_TYPE:
message = "Reply to email address not found"
reply_to = dao_get_reply_to_by_id(service.id, sender_id).email_address
elif notification_type == SMS_TYPE:
message = "SMS sender not found"
reply_to = dao_get_service_sms_senders_by_id(service.id, sender_id).get_reply_to_text()
except NoResultFound:
raise BadRequestError(message=message)
else:
re | return reply_to
def send_pdf_letter_notification(service_id, post_data):
service = dao_fetch_service_by_id(service_id)
check_service_has_permission(LETTER_TYPE, service.permissions)
check_service_has_permission(UPLOAD_LETTERS, service.permissions)
check_service_over_daily_message_limit(KEY_TYPE_NORMAL, service)
validate_created_by(service, post_data["created_by"])
template = get_precompiled_letter_template(service.id)
file_location = "service-{}/{}.pdf".format(service.id, post_data["file_id"])
try:
letter = utils_s3download(current_app.config["TRANSIENT_UPLOADED_LETTERS"], file_location)
except S3ObjectNotFound as e:
current_app.logger.exception(
"Letter {}.pdf not in transient {} bucket".format(
post_data["file_id"], current_app.config["TRANSIENT_UPLOADED_LETTERS"]
)
)
raise e
# Getting the page count won't raise an error since admin has already checked the PDF is valid
billable_units = get_page_count(letter.read())
personalisation = {"address_line_1": post_data["filename"]}
# TODO: stop hard-coding postage as 'second' once we get postage from the admin
notification = persist_notification(
notification_id=post_data["file_id"],
template_id=template.id,
template_version=template.version,
template_postage=template.postage,
recipient=post_data["filename"],
service=service,
personalisation=personalisation,
notification_type=LETTER_TYPE,
api_key_id=None,
key_type=KEY_TYPE_NORMAL,
reference=create_one_off_reference(LETTER_TYPE),
client_reference=post_data["filename"],
created_by_id=post_data["created_by"],
billable_units=billable_units,
postage="second",
)
upload_filename = get_letter_pdf_filename(
notification.reference,
notification.service.crown,
is_scan_letter=False,
postage=notification.postage,
)
move_uploaded_pdf_to_letters_bucket(file_location, upload_filename)
return {"id": str(notification.id)}
| ply_to = template.get_reply_to_text()
|
notify.go | // Copyright (c) 2014-2017 The ifishnet developers
// Copyright (c) 2015-2017 The Decred developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package rpcclient
import (
"bytes"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"time"
"github.com/ifishnet/hdfd/hdfjson"
"github.com/ifishnet/hdfd/chaincfg/chainhash"
"github.com/ifishnet/hdfd/wire"
"github.com/ifishnet/hdfutil"
)
var (
// ErrWebsocketsRequired is an error to describe the condition where the
// caller is trying to use a websocket-only feature, such as requesting
// notifications or other websocket requests when the client is
// configured to run in HTTP POST mode.
ErrWebsocketsRequired = errors.New("a websocket connection is required " +
"to use this feature")
)
// notificationState is used to track the current state of successfully
// registered notification so the state can be automatically re-established on
// reconnect.
type notificationState struct {
notifyBlocks bool
notifyNewTx bool
notifyNewTxVerbose bool
notifyReceived map[string]struct{}
notifySpent map[hdfjson.OutPoint]struct{}
}
// Copy returns a deep copy of the receiver.
func (s *notificationState) Copy() *notificationState {
var stateCopy notificationState
stateCopy.notifyBlocks = s.notifyBlocks
stateCopy.notifyNewTx = s.notifyNewTx
stateCopy.notifyNewTxVerbose = s.notifyNewTxVerbose
stateCopy.notifyReceived = make(map[string]struct{})
for addr := range s.notifyReceived {
stateCopy.notifyReceived[addr] = struct{}{}
}
stateCopy.notifySpent = make(map[hdfjson.OutPoint]struct{})
for op := range s.notifySpent {
stateCopy.notifySpent[op] = struct{}{}
}
return &stateCopy
}
// newNotificationState returns a new notification state ready to be populated.
func newNotificationState() *notificationState {
return ¬ificationState{
notifyReceived: make(map[string]struct{}),
notifySpent: make(map[hdfjson.OutPoint]struct{}),
}
}
// newNilFutureResult returns a new future result channel that already has the
// result waiting on the channel with the reply set to nil. This is useful
// to ignore things such as notifications when the caller didn't specify any
// notification handlers.
func newNilFutureResult() chan *response |
// NotificationHandlers defines callback function pointers to invoke with
// notifications. Since all of the functions are nil by default, all
// notifications are effectively ignored until their handlers are set to a
// concrete callback.
//
// NOTE: Unless otherwise documented, these handlers must NOT directly call any
// blocking calls on the client instance since the input reader goroutine blocks
// until the callback has completed. Doing so will result in a deadlock
// situation.
type NotificationHandlers struct {
// OnClientConnected is invoked when the client connects or reconnects
// to the RPC server. This callback is run async with the rest of the
// notification handlers, and is safe for blocking client requests.
OnClientConnected func()
// OnBlockConnected is invoked when a block is connected to the longest
// (best) chain. It will only be invoked if a preceding call to
// NotifyBlocks has been made to register for the notification and the
// function is non-nil.
//
// Deprecated: Use OnFilteredBlockConnected instead.
OnBlockConnected func(hash *chainhash.Hash, height int32, t time.Time)
// OnFilteredBlockConnected is invoked when a block is connected to the
// longest (best) chain. It will only be invoked if a preceding call to
// NotifyBlocks has been made to register for the notification and the
// function is non-nil. Its parameters differ from OnBlockConnected: it
// receives the block's height, header, and relevant transactions.
OnFilteredBlockConnected func(height int32, header *wire.BlockHeader,
txs []*hdfutil.Tx)
// OnBlockDisconnected is invoked when a block is disconnected from the
// longest (best) chain. It will only be invoked if a preceding call to
// NotifyBlocks has been made to register for the notification and the
// function is non-nil.
//
// Deprecated: Use OnFilteredBlockDisconnected instead.
OnBlockDisconnected func(hash *chainhash.Hash, height int32, t time.Time)
// OnFilteredBlockDisconnected is invoked when a block is disconnected
// from the longest (best) chain. It will only be invoked if a
// preceding NotifyBlocks has been made to register for the notification
// and the call to function is non-nil. Its parameters differ from
// OnBlockDisconnected: it receives the block's height and header.
OnFilteredBlockDisconnected func(height int32, header *wire.BlockHeader)
// OnRecvTx is invoked when a transaction that receives funds to a
// registered address is received into the memory pool and also
// connected to the longest (best) chain. It will only be invoked if a
// preceding call to NotifyReceived, Rescan, or RescanEndHeight has been
// made to register for the notification and the function is non-nil.
//
// Deprecated: Use OnRelevantTxAccepted instead.
OnRecvTx func(transaction *hdfutil.Tx, details *hdfjson.BlockDetails)
// OnRedeemingTx is invoked when a transaction that spends a registered
// outpoint is received into the memory pool and also connected to the
// longest (best) chain. It will only be invoked if a preceding call to
// NotifySpent, Rescan, or RescanEndHeight has been made to register for
// the notification and the function is non-nil.
//
// NOTE: The NotifyReceived will automatically register notifications
// for the outpoints that are now "owned" as a result of receiving
// funds to the registered addresses. This means it is possible for
// this to invoked indirectly as the result of a NotifyReceived call.
//
// Deprecated: Use OnRelevantTxAccepted instead.
OnRedeemingTx func(transaction *hdfutil.Tx, details *hdfjson.BlockDetails)
// OnRelevantTxAccepted is invoked when an unmined transaction passes
// the client's transaction filter.
//
// NOTE: This is a ifishnet extension ported from
// github.com/decred/dcrrpcclient.
OnRelevantTxAccepted func(transaction []byte)
// OnRescanFinished is invoked after a rescan finishes due to a previous
// call to Rescan or RescanEndHeight. Finished rescans should be
// signaled on this notification, rather than relying on the return
// result of a rescan request, due to how hdfd may send various rescan
// notifications after the rescan request has already returned.
//
// Deprecated: Not used with RescanBlocks.
OnRescanFinished func(hash *chainhash.Hash, height int32, blkTime time.Time)
// OnRescanProgress is invoked periodically when a rescan is underway.
// It will only be invoked if a preceding call to Rescan or
// RescanEndHeight has been made and the function is non-nil.
//
// Deprecated: Not used with RescanBlocks.
OnRescanProgress func(hash *chainhash.Hash, height int32, blkTime time.Time)
// OnTxAccepted is invoked when a transaction is accepted into the
// memory pool. It will only be invoked if a preceding call to
// NotifyNewTransactions with the verbose flag set to false has been
// made to register for the notification and the function is non-nil.
OnTxAccepted func(hash *chainhash.Hash, amount hdfutil.Amount)
// OnTxAccepted is invoked when a transaction is accepted into the
// memory pool. It will only be invoked if a preceding call to
// NotifyNewTransactions with the verbose flag set to true has been
// made to register for the notification and the function is non-nil.
OnTxAcceptedVerbose func(txDetails *hdfjson.TxRawResult)
// OnHdfdConnected is invoked when a wallet connects or disconnects from
// hdfd.
//
// This will only be available when client is connected to a wallet
// server such as hdfwallet.
OnHdfdConnected func(connected bool)
// OnAccountBalance is invoked with account balance updates.
//
// This will only be available when speaking to a wallet server
// such as hdfwallet.
OnAccountBalance func(account string, balance hdfutil.Amount, confirmed bool)
// OnWalletLockState is invoked when a wallet is locked or unlocked.
//
// This will only be available when client is connected to a wallet
// server such as hdfwallet.
OnWalletLockState func(locked bool)
// OnUnknownNotification is invoked when an unrecognized notification
// is received. This typically means the notification handling code
// for this package needs to be updated for a new notification type or
// the caller is using a custom notification this package does not know
// about.
OnUnknownNotification func(method string, params []json.RawMessage)
}
// handleNotification examines the passed notification type, performs
// conversions to get the raw notification types into higher level types and
// delivers the notification to the appropriate On<X> handler registered with
// the client.
func (c *Client) handleNotification(ntfn *rawNotification) {
// Ignore the notification if the client is not interested in any
// notifications.
if c.ntfnHandlers == nil {
return
}
switch ntfn.Method {
// OnBlockConnected
case hdfjson.BlockConnectedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnBlockConnected == nil {
return
}
blockHash, blockHeight, blockTime, err := parseChainNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid block connected "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnBlockConnected(blockHash, blockHeight, blockTime)
// OnFilteredBlockConnected
case hdfjson.FilteredBlockConnectedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnFilteredBlockConnected == nil {
return
}
blockHeight, blockHeader, transactions, err :=
parseFilteredBlockConnectedParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid filtered block "+
"connected notification: %v", err)
return
}
c.ntfnHandlers.OnFilteredBlockConnected(blockHeight,
blockHeader, transactions)
// OnBlockDisconnected
case hdfjson.BlockDisconnectedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnBlockDisconnected == nil {
return
}
blockHash, blockHeight, blockTime, err := parseChainNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid block connected "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnBlockDisconnected(blockHash, blockHeight, blockTime)
// OnFilteredBlockDisconnected
case hdfjson.FilteredBlockDisconnectedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnFilteredBlockDisconnected == nil {
return
}
blockHeight, blockHeader, err :=
parseFilteredBlockDisconnectedParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid filtered block "+
"disconnected notification: %v", err)
return
}
c.ntfnHandlers.OnFilteredBlockDisconnected(blockHeight,
blockHeader)
// OnRecvTx
case hdfjson.RecvTxNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnRecvTx == nil {
return
}
tx, block, err := parseChainTxNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid recvtx notification: %v",
err)
return
}
c.ntfnHandlers.OnRecvTx(tx, block)
// OnRedeemingTx
case hdfjson.RedeemingTxNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnRedeemingTx == nil {
return
}
tx, block, err := parseChainTxNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid redeemingtx "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnRedeemingTx(tx, block)
// OnRelevantTxAccepted
case hdfjson.RelevantTxAcceptedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnRelevantTxAccepted == nil {
return
}
transaction, err := parseRelevantTxAcceptedParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid relevanttxaccepted "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnRelevantTxAccepted(transaction)
// OnRescanFinished
case hdfjson.RescanFinishedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnRescanFinished == nil {
return
}
hash, height, blkTime, err := parseRescanProgressParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid rescanfinished "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnRescanFinished(hash, height, blkTime)
// OnRescanProgress
case hdfjson.RescanProgressNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnRescanProgress == nil {
return
}
hash, height, blkTime, err := parseRescanProgressParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid rescanprogress "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnRescanProgress(hash, height, blkTime)
// OnTxAccepted
case hdfjson.TxAcceptedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnTxAccepted == nil {
return
}
hash, amt, err := parseTxAcceptedNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid tx accepted "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnTxAccepted(hash, amt)
// OnTxAcceptedVerbose
case hdfjson.TxAcceptedVerboseNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnTxAcceptedVerbose == nil {
return
}
rawTx, err := parseTxAcceptedVerboseNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid tx accepted verbose "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnTxAcceptedVerbose(rawTx)
// OnHdfdConnected
case hdfjson.HdfdConnectedNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnHdfdConnected == nil {
return
}
connected, err := parseHdfdConnectedNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid hdfd connected "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnHdfdConnected(connected)
// OnAccountBalance
case hdfjson.AccountBalanceNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnAccountBalance == nil {
return
}
account, bal, conf, err := parseAccountBalanceNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid account balance "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnAccountBalance(account, bal, conf)
// OnWalletLockState
case hdfjson.WalletLockStateNtfnMethod:
// Ignore the notification if the client is not interested in
// it.
if c.ntfnHandlers.OnWalletLockState == nil {
return
}
// The account name is not notified, so the return value is
// discarded.
_, locked, err := parseWalletLockStateNtfnParams(ntfn.Params)
if err != nil {
log.Warnf("Received invalid wallet lock state "+
"notification: %v", err)
return
}
c.ntfnHandlers.OnWalletLockState(locked)
// OnUnknownNotification
default:
if c.ntfnHandlers.OnUnknownNotification == nil {
return
}
c.ntfnHandlers.OnUnknownNotification(ntfn.Method, ntfn.Params)
}
}
// wrongNumParams is an error type describing an unparseable JSON-RPC
// notificiation due to an incorrect number of parameters for the
// expected notification type. The value is the number of parameters
// of the invalid notification.
type wrongNumParams int
// Error satisifies the builtin error interface.
func (e wrongNumParams) Error() string {
return fmt.Sprintf("wrong number of parameters (%d)", e)
}
// parseChainNtfnParams parses out the block hash and height from the parameters
// of blockconnected and blockdisconnected notifications.
func parseChainNtfnParams(params []json.RawMessage) (*chainhash.Hash,
int32, time.Time, error) {
if len(params) != 3 {
return nil, 0, time.Time{}, wrongNumParams(len(params))
}
// Unmarshal first parameter as a string.
var blockHashStr string
err := json.Unmarshal(params[0], &blockHashStr)
if err != nil {
return nil, 0, time.Time{}, err
}
// Unmarshal second parameter as an integer.
var blockHeight int32
err = json.Unmarshal(params[1], &blockHeight)
if err != nil {
return nil, 0, time.Time{}, err
}
// Unmarshal third parameter as unix time.
var blockTimeUnix int64
err = json.Unmarshal(params[2], &blockTimeUnix)
if err != nil {
return nil, 0, time.Time{}, err
}
// Create hash from block hash string.
blockHash, err := chainhash.NewHashFromStr(blockHashStr)
if err != nil {
return nil, 0, time.Time{}, err
}
// Create time.Time from unix time.
blockTime := time.Unix(blockTimeUnix, 0)
return blockHash, blockHeight, blockTime, nil
}
// parseFilteredBlockConnectedParams parses out the parameters included in a
// filteredblockconnected notification.
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
func parseFilteredBlockConnectedParams(params []json.RawMessage) (int32,
*wire.BlockHeader, []*hdfutil.Tx, error) {
if len(params) < 3 {
return 0, nil, nil, wrongNumParams(len(params))
}
// Unmarshal first parameter as an integer.
var blockHeight int32
err := json.Unmarshal(params[0], &blockHeight)
if err != nil {
return 0, nil, nil, err
}
// Unmarshal second parameter as a slice of bytes.
blockHeaderBytes, err := parseHexParam(params[1])
if err != nil {
return 0, nil, nil, err
}
// Deserialize block header from slice of bytes.
var blockHeader wire.BlockHeader
err = blockHeader.Deserialize(bytes.NewReader(blockHeaderBytes))
if err != nil {
return 0, nil, nil, err
}
// Unmarshal third parameter as a slice of hex-encoded strings.
var hexTransactions []string
err = json.Unmarshal(params[2], &hexTransactions)
if err != nil {
return 0, nil, nil, err
}
// Create slice of transactions from slice of strings by hex-decoding.
transactions := make([]*hdfutil.Tx, len(hexTransactions))
for i, hexTx := range hexTransactions {
transaction, err := hex.DecodeString(hexTx)
if err != nil {
return 0, nil, nil, err
}
transactions[i], err = hdfutil.NewTxFromBytes(transaction)
if err != nil {
return 0, nil, nil, err
}
}
return blockHeight, &blockHeader, transactions, nil
}
// parseFilteredBlockDisconnectedParams parses out the parameters included in a
// filteredblockdisconnected notification.
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
func parseFilteredBlockDisconnectedParams(params []json.RawMessage) (int32,
*wire.BlockHeader, error) {
if len(params) < 2 {
return 0, nil, wrongNumParams(len(params))
}
// Unmarshal first parameter as an integer.
var blockHeight int32
err := json.Unmarshal(params[0], &blockHeight)
if err != nil {
return 0, nil, err
}
// Unmarshal second parmeter as a slice of bytes.
blockHeaderBytes, err := parseHexParam(params[1])
if err != nil {
return 0, nil, err
}
// Deserialize block header from slice of bytes.
var blockHeader wire.BlockHeader
err = blockHeader.Deserialize(bytes.NewReader(blockHeaderBytes))
if err != nil {
return 0, nil, err
}
return blockHeight, &blockHeader, nil
}
func parseHexParam(param json.RawMessage) ([]byte, error) {
var s string
err := json.Unmarshal(param, &s)
if err != nil {
return nil, err
}
return hex.DecodeString(s)
}
// parseRelevantTxAcceptedParams parses out the parameter included in a
// relevanttxaccepted notification.
func parseRelevantTxAcceptedParams(params []json.RawMessage) (transaction []byte, err error) {
if len(params) < 1 {
return nil, wrongNumParams(len(params))
}
return parseHexParam(params[0])
}
// parseChainTxNtfnParams parses out the transaction and optional details about
// the block it's mined in from the parameters of recvtx and redeemingtx
// notifications.
func parseChainTxNtfnParams(params []json.RawMessage) (*hdfutil.Tx,
*hdfjson.BlockDetails, error) {
if len(params) == 0 || len(params) > 2 {
return nil, nil, wrongNumParams(len(params))
}
// Unmarshal first parameter as a string.
var txHex string
err := json.Unmarshal(params[0], &txHex)
if err != nil {
return nil, nil, err
}
// If present, unmarshal second optional parameter as the block details
// JSON object.
var block *hdfjson.BlockDetails
if len(params) > 1 {
err = json.Unmarshal(params[1], &block)
if err != nil {
return nil, nil, err
}
}
// Hex decode and deserialize the transaction.
serializedTx, err := hex.DecodeString(txHex)
if err != nil {
return nil, nil, err
}
var msgTx wire.MsgTx
err = msgTx.Deserialize(bytes.NewReader(serializedTx))
if err != nil {
return nil, nil, err
}
// TODO: Change recvtx and redeemingtx callback signatures to use
// nicer types for details about the block (block hash as a
// chainhash.Hash, block time as a time.Time, etc.).
return hdfutil.NewTx(&msgTx), block, nil
}
// parseRescanProgressParams parses out the height of the last rescanned block
// from the parameters of rescanfinished and rescanprogress notifications.
func parseRescanProgressParams(params []json.RawMessage) (*chainhash.Hash, int32, time.Time, error) {
if len(params) != 3 {
return nil, 0, time.Time{}, wrongNumParams(len(params))
}
// Unmarshal first parameter as an string.
var hashStr string
err := json.Unmarshal(params[0], &hashStr)
if err != nil {
return nil, 0, time.Time{}, err
}
// Unmarshal second parameter as an integer.
var height int32
err = json.Unmarshal(params[1], &height)
if err != nil {
return nil, 0, time.Time{}, err
}
// Unmarshal third parameter as an integer.
var blkTime int64
err = json.Unmarshal(params[2], &blkTime)
if err != nil {
return nil, 0, time.Time{}, err
}
// Decode string encoding of block hash.
hash, err := chainhash.NewHashFromStr(hashStr)
if err != nil {
return nil, 0, time.Time{}, err
}
return hash, height, time.Unix(blkTime, 0), nil
}
// parseTxAcceptedNtfnParams parses out the transaction hash and total amount
// from the parameters of a txaccepted notification.
func parseTxAcceptedNtfnParams(params []json.RawMessage) (*chainhash.Hash,
hdfutil.Amount, error) {
if len(params) != 2 {
return nil, 0, wrongNumParams(len(params))
}
// Unmarshal first parameter as a string.
var txHashStr string
err := json.Unmarshal(params[0], &txHashStr)
if err != nil {
return nil, 0, err
}
// Unmarshal second parameter as a floating point number.
var famt float64
err = json.Unmarshal(params[1], &famt)
if err != nil {
return nil, 0, err
}
// Bounds check amount.
amt, err := hdfutil.NewAmount(famt)
if err != nil {
return nil, 0, err
}
// Decode string encoding of transaction sha.
txHash, err := chainhash.NewHashFromStr(txHashStr)
if err != nil {
return nil, 0, err
}
return txHash, amt, nil
}
// parseTxAcceptedVerboseNtfnParams parses out details about a raw transaction
// from the parameters of a txacceptedverbose notification.
func parseTxAcceptedVerboseNtfnParams(params []json.RawMessage) (*hdfjson.TxRawResult,
error) {
if len(params) != 1 {
return nil, wrongNumParams(len(params))
}
// Unmarshal first parameter as a raw transaction result object.
var rawTx hdfjson.TxRawResult
err := json.Unmarshal(params[0], &rawTx)
if err != nil {
return nil, err
}
// TODO: change txacceptedverbose notification callbacks to use nicer
// types for all details about the transaction (i.e. decoding hashes
// from their string encoding).
return &rawTx, nil
}
// parseHdfdConnectedNtfnParams parses out the connection status of hdfd
// and hdfwallet from the parameters of a hdfdconnected notification.
func parseHdfdConnectedNtfnParams(params []json.RawMessage) (bool, error) {
if len(params) != 1 {
return false, wrongNumParams(len(params))
}
// Unmarshal first parameter as a boolean.
var connected bool
err := json.Unmarshal(params[0], &connected)
if err != nil {
return false, err
}
return connected, nil
}
// parseAccountBalanceNtfnParams parses out the account name, total balance,
// and whether or not the balance is confirmed or unconfirmed from the
// parameters of an accountbalance notification.
func parseAccountBalanceNtfnParams(params []json.RawMessage) (account string,
balance hdfutil.Amount, confirmed bool, err error) {
if len(params) != 3 {
return "", 0, false, wrongNumParams(len(params))
}
// Unmarshal first parameter as a string.
err = json.Unmarshal(params[0], &account)
if err != nil {
return "", 0, false, err
}
// Unmarshal second parameter as a floating point number.
var fbal float64
err = json.Unmarshal(params[1], &fbal)
if err != nil {
return "", 0, false, err
}
// Unmarshal third parameter as a boolean.
err = json.Unmarshal(params[2], &confirmed)
if err != nil {
return "", 0, false, err
}
// Bounds check amount.
bal, err := hdfutil.NewAmount(fbal)
if err != nil {
return "", 0, false, err
}
return account, bal, confirmed, nil
}
// parseWalletLockStateNtfnParams parses out the account name and locked
// state of an account from the parameters of a walletlockstate notification.
func parseWalletLockStateNtfnParams(params []json.RawMessage) (account string,
locked bool, err error) {
if len(params) != 2 {
return "", false, wrongNumParams(len(params))
}
// Unmarshal first parameter as a string.
err = json.Unmarshal(params[0], &account)
if err != nil {
return "", false, err
}
// Unmarshal second parameter as a boolean.
err = json.Unmarshal(params[1], &locked)
if err != nil {
return "", false, err
}
return account, locked, nil
}
// FutureNotifyBlocksResult is a future promise to deliver the result of a
// NotifyBlocksAsync RPC invocation (or an applicable error).
type FutureNotifyBlocksResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the registration was not successful.
func (r FutureNotifyBlocksResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// NotifyBlocksAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See NotifyBlocks for the blocking version and more details.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
func (c *Client) NotifyBlocksAsync() FutureNotifyBlocksResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
cmd := hdfjson.NewNotifyBlocksCmd()
return c.sendCmd(cmd)
}
// NotifyBlocks registers the client to receive notifications when blocks are
// connected and disconnected from the main chain. The notifications are
// delivered to the notification handlers associated with the client. Calling
// this function has no effect if there are no notification handlers and will
// result in an error if the client is configured to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via one of
// OnBlockConnected or OnBlockDisconnected.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
func (c *Client) NotifyBlocks() error {
return c.NotifyBlocksAsync().Receive()
}
// FutureNotifySpentResult is a future promise to deliver the result of a
// NotifySpentAsync RPC invocation (or an applicable error).
//
// Deprecated: Use FutureLoadTxFilterResult instead.
type FutureNotifySpentResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the registration was not successful.
func (r FutureNotifySpentResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// notifySpentInternal is the same as notifySpentAsync except it accepts
// the converted outpoints as a parameter so the client can more efficiently
// recreate the previous notification state on reconnect.
func (c *Client) notifySpentInternal(outpoints []hdfjson.OutPoint) FutureNotifySpentResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
cmd := hdfjson.NewNotifySpentCmd(outpoints)
return c.sendCmd(cmd)
}
// newOutPointFromWire constructs the hdfjson representation of a transaction
// outpoint from the wire type.
func newOutPointFromWire(op *wire.OutPoint) hdfjson.OutPoint {
return hdfjson.OutPoint{
Hash: op.Hash.String(),
Index: op.Index,
}
}
// NotifySpentAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See NotifySpent for the blocking version and more details.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use LoadTxFilterAsync instead.
func (c *Client) NotifySpentAsync(outpoints []*wire.OutPoint) FutureNotifySpentResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
ops := make([]hdfjson.OutPoint, 0, len(outpoints))
for _, outpoint := range outpoints {
ops = append(ops, newOutPointFromWire(outpoint))
}
cmd := hdfjson.NewNotifySpentCmd(ops)
return c.sendCmd(cmd)
}
// NotifySpent registers the client to receive notifications when the passed
// transaction outputs are spent. The notifications are delivered to the
// notification handlers associated with the client. Calling this function has
// no effect if there are no notification handlers and will result in an error
// if the client is configured to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via
// OnRedeemingTx.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use LoadTxFilter instead.
func (c *Client) NotifySpent(outpoints []*wire.OutPoint) error {
return c.NotifySpentAsync(outpoints).Receive()
}
// FutureNotifyNewTransactionsResult is a future promise to deliver the result
// of a NotifyNewTransactionsAsync RPC invocation (or an applicable error).
type FutureNotifyNewTransactionsResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the registration was not successful.
func (r FutureNotifyNewTransactionsResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// NotifyNewTransactionsAsync returns an instance of a type that can be used to
// get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See NotifyNewTransactionsAsync for the blocking version and more details.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
func (c *Client) NotifyNewTransactionsAsync(verbose bool) FutureNotifyNewTransactionsResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
cmd := hdfjson.NewNotifyNewTransactionsCmd(&verbose)
return c.sendCmd(cmd)
}
// NotifyNewTransactions registers the client to receive notifications every
// time a new transaction is accepted to the memory pool. The notifications are
// delivered to the notification handlers associated with the client. Calling
// this function has no effect if there are no notification handlers and will
// result in an error if the client is configured to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via one of
// OnTxAccepted (when verbose is false) or OnTxAcceptedVerbose (when verbose is
// true).
//
// NOTE: This is a hdfd extension and requires a websocket connection.
func (c *Client) NotifyNewTransactions(verbose bool) error {
return c.NotifyNewTransactionsAsync(verbose).Receive()
}
// FutureNotifyReceivedResult is a future promise to deliver the result of a
// NotifyReceivedAsync RPC invocation (or an applicable error).
//
// Deprecated: Use FutureLoadTxFilterResult instead.
type FutureNotifyReceivedResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the registration was not successful.
func (r FutureNotifyReceivedResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// notifyReceivedInternal is the same as notifyReceivedAsync except it accepts
// the converted addresses as a parameter so the client can more efficiently
// recreate the previous notification state on reconnect.
func (c *Client) notifyReceivedInternal(addresses []string) FutureNotifyReceivedResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
// Convert addresses to strings.
cmd := hdfjson.NewNotifyReceivedCmd(addresses)
return c.sendCmd(cmd)
}
// NotifyReceivedAsync returns an instance of a type that can be used to get the
// result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See NotifyReceived for the blocking version and more details.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use LoadTxFilterAsync instead.
func (c *Client) NotifyReceivedAsync(addresses []hdfutil.Address) FutureNotifyReceivedResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
// Convert addresses to strings.
addrs := make([]string, 0, len(addresses))
for _, addr := range addresses {
addrs = append(addrs, addr.String())
}
cmd := hdfjson.NewNotifyReceivedCmd(addrs)
return c.sendCmd(cmd)
}
// NotifyReceived registers the client to receive notifications every time a
// new transaction which pays to one of the passed addresses is accepted to
// memory pool or in a block connected to the block chain. In addition, when
// one of these transactions is detected, the client is also automatically
// registered for notifications when the new transaction outpoints the address
// now has available are spent (See NotifySpent). The notifications are
// delivered to the notification handlers associated with the client. Calling
// this function has no effect if there are no notification handlers and will
// result in an error if the client is configured to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via one of
// *OnRecvTx (for transactions that receive funds to one of the passed
// addresses) or OnRedeemingTx (for transactions which spend from one
// of the outpoints which are automatically registered upon receipt of funds to
// the address).
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use LoadTxFilter instead.
func (c *Client) NotifyReceived(addresses []hdfutil.Address) error {
return c.NotifyReceivedAsync(addresses).Receive()
}
// FutureRescanResult is a future promise to deliver the result of a RescanAsync
// or RescanEndHeightAsync RPC invocation (or an applicable error).
//
// Deprecated: Use FutureRescanBlocksResult instead.
type FutureRescanResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the rescan was not successful.
func (r FutureRescanResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// RescanAsync returns an instance of a type that can be used to get the result
// of the RPC at some future time by invoking the Receive function on the
// returned instance.
//
// See Rescan for the blocking version and more details.
//
// NOTE: Rescan requests are not issued on client reconnect and must be
// performed manually (ideally with a new start height based on the last
// rescan progress notification). See the OnClientConnected notification
// callback for a good callsite to reissue rescan requests on connect and
// reconnect.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use RescanBlocksAsync instead.
func (c *Client) RescanAsync(startBlock *chainhash.Hash,
addresses []hdfutil.Address,
outpoints []*wire.OutPoint) FutureRescanResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
// Convert block hashes to strings.
var startBlockHashStr string
if startBlock != nil {
startBlockHashStr = startBlock.String()
}
// Convert addresses to strings.
addrs := make([]string, 0, len(addresses))
for _, addr := range addresses {
addrs = append(addrs, addr.String())
}
// Convert outpoints.
ops := make([]hdfjson.OutPoint, 0, len(outpoints))
for _, op := range outpoints {
ops = append(ops, newOutPointFromWire(op))
}
cmd := hdfjson.NewRescanCmd(startBlockHashStr, addrs, ops, nil)
return c.sendCmd(cmd)
}
// Rescan rescans the block chain starting from the provided starting block to
// the end of the longest chain for transactions that pay to the passed
// addresses and transactions which spend the passed outpoints.
//
// The notifications of found transactions are delivered to the notification
// handlers associated with client and this call will not return until the
// rescan has completed. Calling this function has no effect if there are no
// notification handlers and will result in an error if the client is configured
// to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via one of
// OnRedeemingTx (for transactions which spend from the one of the
// passed outpoints), OnRecvTx (for transactions that receive funds
// to one of the passed addresses), and OnRescanProgress (for rescan progress
// updates).
//
// See RescanEndBlock to also specify an ending block to finish the rescan
// without continuing through the best block on the main chain.
//
// NOTE: Rescan requests are not issued on client reconnect and must be
// performed manually (ideally with a new start height based on the last
// rescan progress notification). See the OnClientConnected notification
// callback for a good callsite to reissue rescan requests on connect and
// reconnect.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use RescanBlocks instead.
func (c *Client) Rescan(startBlock *chainhash.Hash,
addresses []hdfutil.Address,
outpoints []*wire.OutPoint) error {
return c.RescanAsync(startBlock, addresses, outpoints).Receive()
}
// RescanEndBlockAsync returns an instance of a type that can be used to get
// the result of the RPC at some future time by invoking the Receive function on
// the returned instance.
//
// See RescanEndBlock for the blocking version and more details.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use RescanBlocksAsync instead.
func (c *Client) RescanEndBlockAsync(startBlock *chainhash.Hash,
addresses []hdfutil.Address, outpoints []*wire.OutPoint,
endBlock *chainhash.Hash) FutureRescanResult {
// Not supported in HTTP POST mode.
if c.config.HTTPPostMode {
return newFutureError(ErrWebsocketsRequired)
}
// Ignore the notification if the client is not interested in
// notifications.
if c.ntfnHandlers == nil {
return newNilFutureResult()
}
// Convert block hashes to strings.
var startBlockHashStr, endBlockHashStr string
if startBlock != nil {
startBlockHashStr = startBlock.String()
}
if endBlock != nil {
endBlockHashStr = endBlock.String()
}
// Convert addresses to strings.
addrs := make([]string, 0, len(addresses))
for _, addr := range addresses {
addrs = append(addrs, addr.String())
}
// Convert outpoints.
ops := make([]hdfjson.OutPoint, 0, len(outpoints))
for _, op := range outpoints {
ops = append(ops, newOutPointFromWire(op))
}
cmd := hdfjson.NewRescanCmd(startBlockHashStr, addrs, ops,
&endBlockHashStr)
return c.sendCmd(cmd)
}
// RescanEndHeight rescans the block chain starting from the provided starting
// block up to the provided ending block for transactions that pay to the
// passed addresses and transactions which spend the passed outpoints.
//
// The notifications of found transactions are delivered to the notification
// handlers associated with client and this call will not return until the
// rescan has completed. Calling this function has no effect if there are no
// notification handlers and will result in an error if the client is configured
// to run in HTTP POST mode.
//
// The notifications delivered as a result of this call will be via one of
// OnRedeemingTx (for transactions which spend from the one of the
// passed outpoints), OnRecvTx (for transactions that receive funds
// to one of the passed addresses), and OnRescanProgress (for rescan progress
// updates).
//
// See Rescan to also perform a rescan through current end of the longest chain.
//
// NOTE: This is a hdfd extension and requires a websocket connection.
//
// Deprecated: Use RescanBlocks instead.
func (c *Client) RescanEndHeight(startBlock *chainhash.Hash,
addresses []hdfutil.Address, outpoints []*wire.OutPoint,
endBlock *chainhash.Hash) error {
return c.RescanEndBlockAsync(startBlock, addresses, outpoints,
endBlock).Receive()
}
// FutureLoadTxFilterResult is a future promise to deliver the result
// of a LoadTxFilterAsync RPC invocation (or an applicable error).
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
type FutureLoadTxFilterResult chan *response
// Receive waits for the response promised by the future and returns an error
// if the registration was not successful.
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
func (r FutureLoadTxFilterResult) Receive() error {
_, err := receiveFuture(r)
return err
}
// LoadTxFilterAsync returns an instance of a type that can be used to
// get the result of the RPC at some future time by invoking the Receive
// function on the returned instance.
//
// See LoadTxFilter for the blocking version and more details.
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
func (c *Client) LoadTxFilterAsync(reload bool, addresses []hdfutil.Address,
outPoints []wire.OutPoint) FutureLoadTxFilterResult {
addrStrs := make([]string, len(addresses))
for i, a := range addresses {
addrStrs[i] = a.EncodeAddress()
}
outPointObjects := make([]hdfjson.OutPoint, len(outPoints))
for i := range outPoints {
outPointObjects[i] = hdfjson.OutPoint{
Hash: outPoints[i].Hash.String(),
Index: outPoints[i].Index,
}
}
cmd := hdfjson.NewLoadTxFilterCmd(reload, addrStrs, outPointObjects)
return c.sendCmd(cmd)
}
// LoadTxFilter loads, reloads, or adds data to a websocket client's transaction
// filter. The filter is consistently updated based on inspected transactions
// during mempool acceptance, block acceptance, and for all rescanned blocks.
//
// NOTE: This is a hdfd extension ported from github.com/decred/dcrrpcclient
// and requires a websocket connection.
func (c *Client) LoadTxFilter(reload bool, addresses []hdfutil.Address, outPoints []wire.OutPoint) error {
return c.LoadTxFilterAsync(reload, addresses, outPoints).Receive()
}
| {
responseChan := make(chan *response, 1)
responseChan <- &response{result: nil, err: nil}
return responseChan
} |
expr.rs | use std::sync::Arc;
use crate::runtime::*;
use ramp;
#[derive(Debug)]
pub enum Expr_ {
Let {
id: ID,
lhs: String,
rhs: Expr,
body: Expr,
},
FnCall {
id: ID,
name: FunctionDesc_,
args: im::Vector<Expr>,
},
Lambda {
id: ID,
params: im::Vector<String>,
body: Expr,
},
BinOp {
id: ID,
lhs: Expr,
op: FunctionDesc_,
rhs: Expr,
},
If {
id: ID,
cond: Expr,
then_body: Expr,
else_body: Expr,
},
Variable {
id: ID,
name: String,
},
IntLiteral {
id: ID,
val: ramp::Int,
},
StringLiteral {
id: ID,
val: String,
},
Blank {
id: ID,
},
}
pub type Expr = Arc<Expr_>;
unsafe impl Send for Expr_ {}
unsafe impl Sync for Expr_ {}
use Expr_::*;
pub fn elet(lhs: &str, rhs: Expr, body: Expr) -> Expr {
Arc::new(Let { id: gid(),
lhs: lhs.to_string(),
rhs,
body })
}
pub fn | (val: &str) -> Expr {
Arc::new(StringLiteral { id: gid(),
val: val.to_string(), })
}
pub fn eint(val: i64) -> Expr {
Arc::new(IntLiteral { id: gid(),
val: ramp::Int::from(val), })
}
pub fn evar(name: &str) -> Expr {
Arc::new(Variable { id: gid(),
name: name.to_string(), })
}
pub fn elambda(names: im::Vector<&str>, body: Expr) -> Expr {
Arc::new(Lambda { id: gid(),
params: names.iter()
.map(|n| n.to_string())
.collect(),
body })
}
pub fn eif(cond: Expr, then_body: Expr, else_body: Expr) -> Expr {
Arc::new(If { id: gid(),
cond,
then_body,
else_body })
}
pub fn ebinop(lhs: Expr,
module: &str,
op: &str,
version: u32,
rhs: Expr)
-> Expr {
Arc::new(BinOp { id: gid(),
lhs,
op:
FunctionDesc_::FunctionDesc("dark".to_string(),
"stdlib".to_string(),
module.to_string(),
op.to_string(),
version),
rhs })
}
pub fn eblank() -> Expr {
Arc::new(Blank { id: gid() })
}
pub fn efn(owner: &str,
package: &str,
module: &str,
name: &str,
version: u32,
args: im::Vector<Expr>)
-> Expr {
Arc::new(FnCall { id: gid(),
name:
FunctionDesc_::FunctionDesc(owner.to_string(),
package.to_string(),
module.to_string(),
name.to_string(),
version),
args })
}
// Stdlib function
pub fn esfn(module: &str,
name: &str,
version: u32,
args: im::Vector<Expr>)
-> Expr {
efn("dark", "stdlib", module, name, version, args)
}
| estr |
twotone-system-security-update-warning.js | var data = {
"body": "<path opacity=\".3\" d=\"M11 7h2v6h-2V7zm0 8h2v2h-2v-2z\" fill=\"currentColor\"/><path d=\"M11 15h2v2h-2zm0-8h2v6h-2z\" fill=\"currentColor\"/><path d=\"M17 1.01L7 1c-1.1 0-2 .9-2 2v18c0 1.1.9 2 2 2h10c1.1 0 2-.9 2-2V3c0-1.1-.9-1.99-2-1.99zM17 21H7v-1h10v1zm0-3H7V6h10v12zm0-14H7V3h10v1z\" fill=\"currentColor\"/><path opacity=\".3\" d=\"M7 21h10v-1H7v1zM7 3v1h10V3H7z\" fill=\"currentColor\"/>",
"width": 24,
"height": 24 | };
exports.__esModule = true;
exports.default = data; | |
win_servermanager_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import win_servermanager
# Globals
win_servermanager.__salt__ = {}
win_servermanager.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinServermanagerTestCase(TestCase):
|
if __name__ == '__main__':
from integration import run_tests
run_tests(WinServermanagerTestCase, needs_daemon=False)
| '''
Validate the win_servermanager state
'''
def test_installed(self):
'''
Test to install the windows feature
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=['salt', 'stack', 'stack'])
mock1 = MagicMock(return_value={'Success': 'True'})
with patch.dict(win_servermanager.__salt__,
{"win_servermanager.list_installed": mock,
"win_servermanager.install": mock1}):
ret.update({'comment': 'The feature salt is already installed'})
self.assertDictEqual(win_servermanager.installed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": True}):
ret.update({'changes': {'feature':
'salt will be installed'
' recurse=False'}, 'result': None,
'comment': ''})
self.assertDictEqual(win_servermanager.installed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": False}):
ret.update({'changes': {'feature': {'Success': 'True'}},
'result': True})
self.assertDictEqual(win_servermanager.installed('salt'),
ret)
def test_removed(self):
'''
Test to remove the windows feature
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=['stack', 'salt', 'salt'])
mock1 = MagicMock(return_value={'Success': 'True'})
with patch.dict(win_servermanager.__salt__,
{"win_servermanager.list_installed": mock,
"win_servermanager.remove": mock1}):
ret.update({'comment': 'The feature salt is not installed'})
self.assertDictEqual(win_servermanager.removed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": True}):
ret.update({'changes': {'feature':
'salt will be removed'},
'result': None, 'comment': ''})
self.assertDictEqual(win_servermanager.removed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": False}):
ret.update({'changes': {'feature': {'Success': 'True'}},
'result': True})
self.assertDictEqual(win_servermanager.removed('salt'),
ret) |
notifications.go | // Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wallet
import (
"bytes"
"sync"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwallet/waddrmgr"
"github.com/btcsuite/btcwallet/walletdb"
"github.com/btcsuite/btcwallet/wtxmgr"
)
// TODO: It would be good to send errors during notification creation to the rpc
// server instead of just logging them here so the client is aware that wallet
// isn't working correctly and notifications are missing.
// TODO: Anything dealing with accounts here is expensive because the database
// is not organized correctly for true account support, but do the slow thing
// instead of the easy thing since the db can be fixed later, and we want the
// api correct now.
// NotificationServer is a server that interested clients may hook into to
// receive notifications of changes in a wallet. A client is created for each
// registered notification. Clients are guaranteed to receive messages in the
// order wallet created them, but there is no guaranteed synchronization between
// different clients.
type NotificationServer struct {
transactions []chan *TransactionNotifications
currentTxNtfn *TransactionNotifications // coalesce this since wallet does not add mined txs together
spentness map[uint32][]chan *SpentnessNotifications
accountClients []chan *AccountNotification
mu sync.Mutex // Only protects registered client channels
wallet *Wallet // smells like hacks
}
func newNotificationServer(wallet *Wallet) *NotificationServer {
return &NotificationServer{
spentness: make(map[uint32][]chan *SpentnessNotifications),
wallet: wallet,
}
}
func lookupInputAccount(dbtx walletdb.ReadTx, w *Wallet, details *wtxmgr.TxDetails, deb wtxmgr.DebitRecord) uint32 {
addrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)
txmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)
// TODO: Debits should record which account(s?) they
// debit from so this doesn't need to be looked up.
prevOP := &details.MsgTx.TxIn[deb.Index].PreviousOutPoint
prev, err := w.TxStore.TxDetails(txmgrNs, &prevOP.Hash)
if err != nil {
log.Errorf("Cannot query previous transaction details for %v: %v", prevOP.Hash, err)
return 0
}
if prev == nil {
log.Errorf("Missing previous transaction %v", prevOP.Hash)
return 0
}
prevOut := prev.MsgTx.TxOut[prevOP.Index]
_, addrs, _, err := txscript.ExtractPkScriptAddrs(prevOut.PkScript, w.chainParams)
var inputAcct uint32
if err == nil && len(addrs) > 0 {
_, inputAcct, err = w.Manager.AddrAccount(addrmgrNs, addrs[0])
}
if err != nil {
log.Errorf("Cannot fetch account for previous output %v: %v", prevOP, err)
inputAcct = 0
}
return inputAcct
}
func lookupOutputChain(dbtx walletdb.ReadTx, w *Wallet, details *wtxmgr.TxDetails,
cred wtxmgr.CreditRecord) (account uint32, internal bool) {
addrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)
output := details.MsgTx.TxOut[cred.Index]
_, addrs, _, err := txscript.ExtractPkScriptAddrs(output.PkScript, w.chainParams)
var ma waddrmgr.ManagedAddress
if err == nil && len(addrs) > 0 {
ma, err = w.Manager.Address(addrmgrNs, addrs[0])
}
if err != nil {
log.Errorf("Cannot fetch account for wallet output: %v", err)
} else {
account = ma.Account()
internal = ma.Internal()
}
return
}
func makeTxSummary(dbtx walletdb.ReadTx, w *Wallet, details *wtxmgr.TxDetails) TransactionSummary {
serializedTx := details.SerializedTx
if serializedTx == nil {
var buf bytes.Buffer
err := details.MsgTx.Serialize(&buf)
if err != nil {
log.Errorf("Transaction serialization: %v", err)
}
serializedTx = buf.Bytes()
}
var fee btcutil.Amount
if len(details.Debits) == len(details.MsgTx.TxIn) {
for _, deb := range details.Debits {
fee += deb.Amount
}
for _, txOut := range details.MsgTx.TxOut {
fee -= btcutil.Amount(txOut.Value)
}
}
var inputs []TransactionSummaryInput
if len(details.Debits) != 0 {
inputs = make([]TransactionSummaryInput, len(details.Debits))
for i, d := range details.Debits {
inputs[i] = TransactionSummaryInput{
Index: d.Index,
PreviousAccount: lookupInputAccount(dbtx, w, details, d),
PreviousAmount: d.Amount,
}
}
}
outputs := make([]TransactionSummaryOutput, 0, len(details.MsgTx.TxOut))
for i := range details.MsgTx.TxOut {
credIndex := len(outputs)
mine := len(details.Credits) > credIndex && details.Credits[credIndex].Index == uint32(i)
if !mine {
continue
}
acct, internal := lookupOutputChain(dbtx, w, details, details.Credits[credIndex])
output := TransactionSummaryOutput{
Index: uint32(i),
Account: acct,
Internal: internal,
}
outputs = append(outputs, output)
}
return TransactionSummary{
Hash: &details.Hash,
Transaction: serializedTx,
MyInputs: inputs,
MyOutputs: outputs,
Fee: fee,
Timestamp: details.Received.Unix(),
}
}
func totalBalances(dbtx walletdb.ReadTx, w *Wallet, m map[uint32]btcutil.Amount) error {
addrmgrNs := dbtx.ReadBucket(waddrmgrNamespaceKey)
unspent, err := w.TxStore.UnspentOutputs(dbtx.ReadBucket(wtxmgrNamespaceKey))
if err != nil {
return err
}
for i := range unspent {
output := &unspent[i]
var outputAcct uint32
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
output.PkScript, w.chainParams)
if err == nil && len(addrs) > 0 {
_, outputAcct, err = w.Manager.AddrAccount(addrmgrNs, addrs[0])
}
if err == nil {
_, ok := m[outputAcct]
if ok {
m[outputAcct] += output.Amount
}
}
}
return nil
}
func flattenBalanceMap(m map[uint32]btcutil.Amount) []AccountBalance {
s := make([]AccountBalance, 0, len(m))
for k, v := range m {
s = append(s, AccountBalance{Account: k, TotalBalance: v})
}
return s
}
func | (w *Wallet, m map[uint32]btcutil.Amount, txs []TransactionSummary) {
for _, tx := range txs {
for _, d := range tx.MyInputs {
m[d.PreviousAccount] = 0
}
for _, c := range tx.MyOutputs {
m[c.Account] = 0
}
}
}
func (s *NotificationServer) notifyUnminedTransaction(dbtx walletdb.ReadTx, details *wtxmgr.TxDetails) {
// Sanity check: should not be currently coalescing a notification for
// mined transactions at the same time that an unmined tx is notified.
if s.currentTxNtfn != nil {
log.Errorf("Notifying unmined tx notification (%s) while creating notification for blocks",
details.Hash)
}
defer s.mu.Unlock()
s.mu.Lock()
clients := s.transactions
if len(clients) == 0 {
return
}
unminedTxs := []TransactionSummary{makeTxSummary(dbtx, s.wallet, details)}
unminedHashes, err := s.wallet.TxStore.UnminedTxHashes(dbtx.ReadBucket(wtxmgrNamespaceKey))
if err != nil {
log.Errorf("Cannot fetch unmined transaction hashes: %v", err)
return
}
bals := make(map[uint32]btcutil.Amount)
relevantAccounts(s.wallet, bals, unminedTxs)
err = totalBalances(dbtx, s.wallet, bals)
if err != nil {
log.Errorf("Cannot determine balances for relevant accounts: %v", err)
return
}
n := &TransactionNotifications{
UnminedTransactions: unminedTxs,
UnminedTransactionHashes: unminedHashes,
NewBalances: flattenBalanceMap(bals),
}
for _, c := range clients {
c <- n
}
}
func (s *NotificationServer) notifyDetachedBlock(hash *chainhash.Hash) {
if s.currentTxNtfn == nil {
s.currentTxNtfn = &TransactionNotifications{}
}
s.currentTxNtfn.DetachedBlocks = append(s.currentTxNtfn.DetachedBlocks, hash)
}
func (s *NotificationServer) notifyMinedTransaction(dbtx walletdb.ReadTx, details *wtxmgr.TxDetails, block *wtxmgr.BlockMeta) {
if s.currentTxNtfn == nil {
s.currentTxNtfn = &TransactionNotifications{}
}
n := len(s.currentTxNtfn.AttachedBlocks)
if n == 0 || *s.currentTxNtfn.AttachedBlocks[n-1].Hash != block.Hash {
s.currentTxNtfn.AttachedBlocks = append(s.currentTxNtfn.AttachedBlocks, Block{
Hash: &block.Hash,
Height: block.Height,
Timestamp: block.Time.Unix(),
})
n++
}
txs := s.currentTxNtfn.AttachedBlocks[n-1].Transactions
s.currentTxNtfn.AttachedBlocks[n-1].Transactions =
append(txs, makeTxSummary(dbtx, s.wallet, details))
}
func (s *NotificationServer) notifyAttachedBlock(dbtx walletdb.ReadTx, block *wtxmgr.BlockMeta) {
if s.currentTxNtfn == nil {
s.currentTxNtfn = &TransactionNotifications{}
}
// Add block details if it wasn't already included for previously
// notified mined transactions.
n := len(s.currentTxNtfn.AttachedBlocks)
if n == 0 || *s.currentTxNtfn.AttachedBlocks[n-1].Hash != block.Hash {
s.currentTxNtfn.AttachedBlocks = append(s.currentTxNtfn.AttachedBlocks, Block{
Hash: &block.Hash,
Height: block.Height,
Timestamp: block.Time.Unix(),
})
}
// For now (until notification coalescing isn't necessary) just use
// chain length to determine if this is the new best block.
if s.wallet.ChainSynced() {
if len(s.currentTxNtfn.DetachedBlocks) >= len(s.currentTxNtfn.AttachedBlocks) {
return
}
}
defer s.mu.Unlock()
s.mu.Lock()
clients := s.transactions
if len(clients) == 0 {
s.currentTxNtfn = nil
return
}
// The UnminedTransactions field is intentionally not set. Since the
// hashes of all detached blocks are reported, and all transactions
// moved from a mined block back to unconfirmed are either in the
// UnminedTransactionHashes slice or don't exist due to conflicting with
// a mined transaction in the new best chain, there is no possiblity of
// a new, previously unseen transaction appearing in unconfirmed.
txmgrNs := dbtx.ReadBucket(wtxmgrNamespaceKey)
unminedHashes, err := s.wallet.TxStore.UnminedTxHashes(txmgrNs)
if err != nil {
log.Errorf("Cannot fetch unmined transaction hashes: %v", err)
return
}
s.currentTxNtfn.UnminedTransactionHashes = unminedHashes
bals := make(map[uint32]btcutil.Amount)
for _, b := range s.currentTxNtfn.AttachedBlocks {
relevantAccounts(s.wallet, bals, b.Transactions)
}
err = totalBalances(dbtx, s.wallet, bals)
if err != nil {
log.Errorf("Cannot determine balances for relevant accounts: %v", err)
return
}
s.currentTxNtfn.NewBalances = flattenBalanceMap(bals)
for _, c := range clients {
c <- s.currentTxNtfn
}
s.currentTxNtfn = nil
}
// TransactionNotifications is a notification of changes to the wallet's
// transaction set and the current chain tip that wallet is considered to be
// synced with. All transactions added to the blockchain are organized by the
// block they were mined in.
//
// During a chain switch, all removed block hashes are included. Detached
// blocks are sorted in the reverse order they were mined. Attached blocks are
// sorted in the order mined.
//
// All newly added unmined transactions are included. Removed unmined
// transactions are not explicitly included. Instead, the hashes of all
// transactions still unmined are included.
//
// If any transactions were involved, each affected account's new total balance
// is included.
//
// TODO: Because this includes stuff about blocks and can be fired without any
// changes to transactions, it needs a better name.
type TransactionNotifications struct {
AttachedBlocks []Block
DetachedBlocks []*chainhash.Hash
UnminedTransactions []TransactionSummary
UnminedTransactionHashes []*chainhash.Hash
NewBalances []AccountBalance
}
// Block contains the properties and all relevant transactions of an attached
// block.
type Block struct {
Hash *chainhash.Hash
Height int32
Timestamp int64
Transactions []TransactionSummary
}
// TransactionSummary contains a transaction relevant to the wallet and marks
// which inputs and outputs were relevant.
type TransactionSummary struct {
Hash *chainhash.Hash
Transaction []byte
MyInputs []TransactionSummaryInput
MyOutputs []TransactionSummaryOutput
Fee btcutil.Amount
Timestamp int64
}
// TransactionSummaryInput describes a transaction input that is relevant to the
// wallet. The Index field marks the transaction input index of the transaction
// (not included here). The PreviousAccount and PreviousAmount fields describe
// how much this input debits from a wallet account.
type TransactionSummaryInput struct {
Index uint32
PreviousAccount uint32
PreviousAmount btcutil.Amount
}
// TransactionSummaryOutput describes wallet properties of a transaction output
// controlled by the wallet. The Index field marks the transaction output index
// of the transaction (not included here).
type TransactionSummaryOutput struct {
Index uint32
Account uint32
Internal bool
}
// AccountBalance associates a total (zero confirmation) balance with an
// account. Balances for other minimum confirmation counts require more
// expensive logic and it is not clear which minimums a client is interested in,
// so they are not included.
type AccountBalance struct {
Account uint32
TotalBalance btcutil.Amount
}
// TransactionNotificationsClient receives TransactionNotifications from the
// NotificationServer over the channel C.
type TransactionNotificationsClient struct {
C <-chan *TransactionNotifications
server *NotificationServer
}
// TransactionNotifications returns a client for receiving
// TransactionNotifiations notifications over a channel. The channel is
// unbuffered.
//
// When finished, the Done method should be called on the client to disassociate
// it from the server.
func (s *NotificationServer) TransactionNotifications() TransactionNotificationsClient {
c := make(chan *TransactionNotifications)
s.mu.Lock()
s.transactions = append(s.transactions, c)
s.mu.Unlock()
return TransactionNotificationsClient{
C: c,
server: s,
}
}
// Done deregisters the client from the server and drains any remaining
// messages. It must be called exactly once when the client is finished
// receiving notifications.
func (c *TransactionNotificationsClient) Done() {
go func() {
// Drain notifications until the client channel is removed from
// the server and closed.
for range c.C {
}
}()
go func() {
s := c.server
s.mu.Lock()
clients := s.transactions
for i, ch := range clients {
if c.C == ch {
clients[i] = clients[len(clients)-1]
s.transactions = clients[:len(clients)-1]
close(ch)
break
}
}
s.mu.Unlock()
}()
}
// SpentnessNotifications is a notification that is fired for transaction
// outputs controlled by some account's keys. The notification may be about a
// newly added unspent transaction output or that a previously unspent output is
// now spent. When spent, the notification includes the spending transaction's
// hash and input index.
type SpentnessNotifications struct {
hash *chainhash.Hash
spenderHash *chainhash.Hash
index uint32
spenderIndex uint32
}
// Hash returns the transaction hash of the spent output.
func (n *SpentnessNotifications) Hash() *chainhash.Hash {
return n.hash
}
// Index returns the transaction output index of the spent output.
func (n *SpentnessNotifications) Index() uint32 {
return n.index
}
// Spender returns the spending transction's hash and input index, if any. If
// the output is unspent, the final bool return is false.
func (n *SpentnessNotifications) Spender() (*chainhash.Hash, uint32, bool) {
return n.spenderHash, n.spenderIndex, n.spenderHash != nil
}
// notifyUnspentOutput notifies registered clients of a new unspent output that
// is controlled by the wallet.
func (s *NotificationServer) notifyUnspentOutput(account uint32, hash *chainhash.Hash, index uint32) {
defer s.mu.Unlock()
s.mu.Lock()
clients := s.spentness[account]
if len(clients) == 0 {
return
}
n := &SpentnessNotifications{
hash: hash,
index: index,
}
for _, c := range clients {
c <- n
}
}
// notifySpentOutput notifies registered clients that a previously-unspent
// output is now spent, and includes the spender hash and input index in the
// notification.
func (s *NotificationServer) notifySpentOutput(account uint32, op *wire.OutPoint, spenderHash *chainhash.Hash, spenderIndex uint32) {
defer s.mu.Unlock()
s.mu.Lock()
clients := s.spentness[account]
if len(clients) == 0 {
return
}
n := &SpentnessNotifications{
hash: &op.Hash,
index: op.Index,
spenderHash: spenderHash,
spenderIndex: spenderIndex,
}
for _, c := range clients {
c <- n
}
}
// SpentnessNotificationsClient receives SpentnessNotifications from the
// NotificationServer over the channel C.
type SpentnessNotificationsClient struct {
C <-chan *SpentnessNotifications
account uint32
server *NotificationServer
}
// AccountSpentnessNotifications registers a client for spentness changes of
// outputs controlled by the account.
func (s *NotificationServer) AccountSpentnessNotifications(account uint32) SpentnessNotificationsClient {
c := make(chan *SpentnessNotifications)
s.mu.Lock()
s.spentness[account] = append(s.spentness[account], c)
s.mu.Unlock()
return SpentnessNotificationsClient{
C: c,
account: account,
server: s,
}
}
// Done deregisters the client from the server and drains any remaining
// messages. It must be called exactly once when the client is finished
// receiving notifications.
func (c *SpentnessNotificationsClient) Done() {
go func() {
// Drain notifications until the client channel is removed from
// the server and closed.
for range c.C {
}
}()
go func() {
s := c.server
s.mu.Lock()
clients := s.spentness[c.account]
for i, ch := range clients {
if c.C == ch {
clients[i] = clients[len(clients)-1]
s.spentness[c.account] = clients[:len(clients)-1]
close(ch)
break
}
}
s.mu.Unlock()
}()
}
// AccountNotification contains properties regarding an account, such as its
// name and the number of derived and imported keys. When any of these
// properties change, the notification is fired.
type AccountNotification struct {
AccountNumber uint32
AccountName string
ExternalKeyCount uint32
InternalKeyCount uint32
ImportedKeyCount uint32
}
func (s *NotificationServer) notifyAccountProperties(props *waddrmgr.AccountProperties) {
defer s.mu.Unlock()
s.mu.Lock()
clients := s.accountClients
if len(clients) == 0 {
return
}
n := &AccountNotification{
AccountNumber: props.AccountNumber,
AccountName: props.AccountName,
ExternalKeyCount: props.ExternalKeyCount,
InternalKeyCount: props.InternalKeyCount,
ImportedKeyCount: props.ImportedKeyCount,
}
for _, c := range clients {
c <- n
}
}
// AccountNotificationsClient receives AccountNotifications over the channel C.
type AccountNotificationsClient struct {
C chan *AccountNotification
server *NotificationServer
}
// AccountNotifications returns a client for receiving AccountNotifications over
// a channel. The channel is unbuffered. When finished, the client's Done
// method should be called to disassociate the client from the server.
func (s *NotificationServer) AccountNotifications() AccountNotificationsClient {
c := make(chan *AccountNotification)
s.mu.Lock()
s.accountClients = append(s.accountClients, c)
s.mu.Unlock()
return AccountNotificationsClient{
C: c,
server: s,
}
}
// Done deregisters the client from the server and drains any remaining
// messages. It must be called exactly once when the client is finished
// receiving notifications.
func (c *AccountNotificationsClient) Done() {
go func() {
for range c.C {
}
}()
go func() {
s := c.server
s.mu.Lock()
clients := s.accountClients
for i, ch := range clients {
if c.C == ch {
clients[i] = clients[len(clients)-1]
s.accountClients = clients[:len(clients)-1]
close(ch)
break
}
}
s.mu.Unlock()
}()
}
| relevantAccounts |
validators.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package securityhub
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/service/securityhub/types"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware"
)
type validateOpAcceptAdministratorInvitation struct {
}
func (*validateOpAcceptAdministratorInvitation) ID() string {
return "OperationInputValidation"
}
func (m *validateOpAcceptAdministratorInvitation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*AcceptAdministratorInvitationInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpAcceptAdministratorInvitationInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpAcceptInvitation struct {
}
func (*validateOpAcceptInvitation) ID() string {
return "OperationInputValidation"
}
func (m *validateOpAcceptInvitation) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*AcceptInvitationInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpAcceptInvitationInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpBatchDisableStandards struct {
}
func (*validateOpBatchDisableStandards) ID() string {
return "OperationInputValidation"
}
func (m *validateOpBatchDisableStandards) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*BatchDisableStandardsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpBatchDisableStandardsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpBatchEnableStandards struct {
}
func (*validateOpBatchEnableStandards) ID() string {
return "OperationInputValidation"
}
func (m *validateOpBatchEnableStandards) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*BatchEnableStandardsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpBatchEnableStandardsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpBatchImportFindings struct {
}
func (*validateOpBatchImportFindings) ID() string {
return "OperationInputValidation"
}
func (m *validateOpBatchImportFindings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*BatchImportFindingsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpBatchImportFindingsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpBatchUpdateFindings struct {
}
func (*validateOpBatchUpdateFindings) ID() string {
return "OperationInputValidation"
}
func (m *validateOpBatchUpdateFindings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*BatchUpdateFindingsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpBatchUpdateFindingsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpCreateActionTarget struct {
}
func (*validateOpCreateActionTarget) ID() string {
return "OperationInputValidation"
}
func (m *validateOpCreateActionTarget) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*CreateActionTargetInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpCreateActionTargetInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpCreateInsight struct {
}
func (*validateOpCreateInsight) ID() string {
return "OperationInputValidation"
}
func (m *validateOpCreateInsight) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*CreateInsightInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpCreateInsightInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpCreateMembers struct {
}
func (*validateOpCreateMembers) ID() string {
return "OperationInputValidation"
}
func (m *validateOpCreateMembers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*CreateMembersInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpCreateMembersInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeclineInvitations struct {
}
func (*validateOpDeclineInvitations) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeclineInvitations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeclineInvitationsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeclineInvitationsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeleteActionTarget struct {
}
func (*validateOpDeleteActionTarget) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeleteActionTarget) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeleteActionTargetInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeleteActionTargetInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeleteInsight struct {
}
func (*validateOpDeleteInsight) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeleteInsight) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeleteInsightInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeleteInsightInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeleteInvitations struct {
}
func (*validateOpDeleteInvitations) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeleteInvitations) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeleteInvitationsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeleteInvitationsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDeleteMembers struct {
}
func (*validateOpDeleteMembers) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDeleteMembers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DeleteMembersInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDeleteMembersInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDescribeStandardsControls struct {
}
func (*validateOpDescribeStandardsControls) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDescribeStandardsControls) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DescribeStandardsControlsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDescribeStandardsControlsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDisableImportFindingsForProduct struct {
}
func (*validateOpDisableImportFindingsForProduct) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDisableImportFindingsForProduct) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DisableImportFindingsForProductInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDisableImportFindingsForProductInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDisableOrganizationAdminAccount struct {
}
func (*validateOpDisableOrganizationAdminAccount) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDisableOrganizationAdminAccount) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DisableOrganizationAdminAccountInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDisableOrganizationAdminAccountInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpDisassociateMembers struct {
}
func (*validateOpDisassociateMembers) ID() string {
return "OperationInputValidation"
}
func (m *validateOpDisassociateMembers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*DisassociateMembersInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpDisassociateMembersInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpEnableImportFindingsForProduct struct {
}
func (*validateOpEnableImportFindingsForProduct) ID() string {
return "OperationInputValidation"
}
func (m *validateOpEnableImportFindingsForProduct) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*EnableImportFindingsForProductInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpEnableImportFindingsForProductInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpEnableOrganizationAdminAccount struct {
}
func (*validateOpEnableOrganizationAdminAccount) ID() string {
return "OperationInputValidation"
}
func (m *validateOpEnableOrganizationAdminAccount) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*EnableOrganizationAdminAccountInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpEnableOrganizationAdminAccountInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetInsightResults struct {
}
func (*validateOpGetInsightResults) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetInsightResults) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetInsightResultsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetInsightResultsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpGetMembers struct {
}
func (*validateOpGetMembers) ID() string {
return "OperationInputValidation"
}
func (m *validateOpGetMembers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*GetMembersInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpGetMembersInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpInviteMembers struct {
}
func (*validateOpInviteMembers) ID() string {
return "OperationInputValidation"
}
func (m *validateOpInviteMembers) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*InviteMembersInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpInviteMembersInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpListTagsForResource struct {
}
func (*validateOpListTagsForResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpListTagsForResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*ListTagsForResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpListTagsForResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpTagResource struct {
}
func (*validateOpTagResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*TagResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpTagResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUntagResource struct {
}
func (*validateOpUntagResource) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UntagResourceInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUntagResourceInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateActionTarget struct {
}
func (*validateOpUpdateActionTarget) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateActionTarget) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateActionTargetInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateActionTargetInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateFindings struct {
}
func (*validateOpUpdateFindings) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateFindings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateFindingsInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateFindingsInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateInsight struct {
}
func (*validateOpUpdateInsight) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateInsight) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateInsightInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateInsightInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateOrganizationConfiguration struct {
}
func (*validateOpUpdateOrganizationConfiguration) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateOrganizationConfiguration) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateOrganizationConfigurationInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateOrganizationConfigurationInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
type validateOpUpdateStandardsControl struct {
}
func (*validateOpUpdateStandardsControl) ID() string {
return "OperationInputValidation"
}
func (m *validateOpUpdateStandardsControl) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
input, ok := in.Parameters.(*UpdateStandardsControlInput)
if !ok {
return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
}
if err := validateOpUpdateStandardsControlInput(input); err != nil {
return out, metadata, err
}
return next.HandleInitialize(ctx, in)
}
func addOpAcceptAdministratorInvitationValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpAcceptAdministratorInvitation{}, middleware.After)
}
func addOpAcceptInvitationValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpAcceptInvitation{}, middleware.After)
}
func addOpBatchDisableStandardsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpBatchDisableStandards{}, middleware.After)
}
func addOpBatchEnableStandardsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpBatchEnableStandards{}, middleware.After)
}
func addOpBatchImportFindingsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpBatchImportFindings{}, middleware.After)
}
func addOpBatchUpdateFindingsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpBatchUpdateFindings{}, middleware.After)
}
func addOpCreateActionTargetValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateActionTarget{}, middleware.After)
}
func addOpCreateInsightValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateInsight{}, middleware.After)
}
func addOpCreateMembersValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateMembers{}, middleware.After)
}
func addOpDeclineInvitationsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeclineInvitations{}, middleware.After)
}
func addOpDeleteActionTargetValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeleteActionTarget{}, middleware.After)
}
func addOpDeleteInsightValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeleteInsight{}, middleware.After)
}
func addOpDeleteInvitationsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeleteInvitations{}, middleware.After)
}
func addOpDeleteMembersValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDeleteMembers{}, middleware.After)
}
func addOpDescribeStandardsControlsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDescribeStandardsControls{}, middleware.After)
}
func addOpDisableImportFindingsForProductValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDisableImportFindingsForProduct{}, middleware.After)
}
func addOpDisableOrganizationAdminAccountValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDisableOrganizationAdminAccount{}, middleware.After)
}
func addOpDisassociateMembersValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDisassociateMembers{}, middleware.After)
}
func addOpEnableImportFindingsForProductValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpEnableImportFindingsForProduct{}, middleware.After)
}
func addOpEnableOrganizationAdminAccountValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpEnableOrganizationAdminAccount{}, middleware.After)
}
func addOpGetInsightResultsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetInsightResults{}, middleware.After)
}
func addOpGetMembersValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpGetMembers{}, middleware.After)
}
func addOpInviteMembersValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpInviteMembers{}, middleware.After)
}
func addOpListTagsForResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpListTagsForResource{}, middleware.After)
}
func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpTagResource{}, middleware.After)
}
func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After)
}
func addOpUpdateActionTargetValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateActionTarget{}, middleware.After)
}
func addOpUpdateFindingsValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateFindings{}, middleware.After)
}
func addOpUpdateInsightValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateInsight{}, middleware.After)
}
func addOpUpdateOrganizationConfigurationValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateOrganizationConfiguration{}, middleware.After)
}
func addOpUpdateStandardsControlValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpUpdateStandardsControl{}, middleware.After)
}
func validateAccountDetails(v *types.AccountDetails) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AccountDetails"}
if v.AccountId == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateAccountDetailsList(v []types.AccountDetails) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AccountDetailsList"}
for i := range v {
if err := validateAccountDetails(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateAwsSecurityFinding(v *types.AwsSecurityFinding) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AwsSecurityFinding"}
if v.SchemaVersion == nil {
invalidParams.Add(smithy.NewErrParamRequired("SchemaVersion"))
}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if v.ProductArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ProductArn"))
}
if v.GeneratorId == nil {
invalidParams.Add(smithy.NewErrParamRequired("GeneratorId"))
}
if v.AwsAccountId == nil {
invalidParams.Add(smithy.NewErrParamRequired("AwsAccountId"))
}
if v.CreatedAt == nil {
invalidParams.Add(smithy.NewErrParamRequired("CreatedAt"))
}
if v.UpdatedAt == nil {
invalidParams.Add(smithy.NewErrParamRequired("UpdatedAt"))
}
if v.Title == nil {
invalidParams.Add(smithy.NewErrParamRequired("Title"))
}
if v.Description == nil {
invalidParams.Add(smithy.NewErrParamRequired("Description"))
}
if v.Malware != nil {
if err := validateMalwareList(v.Malware); err != nil {
invalidParams.AddNested("Malware", err.(smithy.InvalidParamsError))
}
}
if v.Resources == nil {
invalidParams.Add(smithy.NewErrParamRequired("Resources"))
} else if v.Resources != nil {
if err := validateResourceList(v.Resources); err != nil {
invalidParams.AddNested("Resources", err.(smithy.InvalidParamsError))
}
}
if v.Compliance != nil {
if err := validateCompliance(v.Compliance); err != nil {
invalidParams.AddNested("Compliance", err.(smithy.InvalidParamsError))
}
}
if v.RelatedFindings != nil {
if err := validateRelatedFindingList(v.RelatedFindings); err != nil {
invalidParams.AddNested("RelatedFindings", err.(smithy.InvalidParamsError))
}
}
if v.Note != nil {
if err := validateNote(v.Note); err != nil {
invalidParams.AddNested("Note", err.(smithy.InvalidParamsError))
}
}
if v.Vulnerabilities != nil {
if err := validateVulnerabilityList(v.Vulnerabilities); err != nil {
invalidParams.AddNested("Vulnerabilities", err.(smithy.InvalidParamsError))
}
}
if v.PatchSummary != nil {
if err := validatePatchSummary(v.PatchSummary); err != nil {
invalidParams.AddNested("PatchSummary", err.(smithy.InvalidParamsError))
}
}
if v.FindingProviderFields != nil {
if err := validateFindingProviderFields(v.FindingProviderFields); err != nil {
invalidParams.AddNested("FindingProviderFields", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateAwsSecurityFindingIdentifier(v *types.AwsSecurityFindingIdentifier) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AwsSecurityFindingIdentifier"}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if v.ProductArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ProductArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateAwsSecurityFindingIdentifierList(v []types.AwsSecurityFindingIdentifier) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AwsSecurityFindingIdentifierList"}
for i := range v {
if err := validateAwsSecurityFindingIdentifier(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateBatchImportFindingsRequestFindingList(v []types.AwsSecurityFinding) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchImportFindingsRequestFindingList"}
for i := range v {
if err := validateAwsSecurityFinding(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateCompliance(v *types.Compliance) error |
func validateFindingProviderFields(v *types.FindingProviderFields) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "FindingProviderFields"}
if v.RelatedFindings != nil {
if err := validateRelatedFindingList(v.RelatedFindings); err != nil {
invalidParams.AddNested("RelatedFindings", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateMalware(v *types.Malware) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Malware"}
if v.Name == nil {
invalidParams.Add(smithy.NewErrParamRequired("Name"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateMalwareList(v []types.Malware) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "MalwareList"}
for i := range v {
if err := validateMalware(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateNote(v *types.Note) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Note"}
if v.Text == nil {
invalidParams.Add(smithy.NewErrParamRequired("Text"))
}
if v.UpdatedBy == nil {
invalidParams.Add(smithy.NewErrParamRequired("UpdatedBy"))
}
if v.UpdatedAt == nil {
invalidParams.Add(smithy.NewErrParamRequired("UpdatedAt"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateNoteUpdate(v *types.NoteUpdate) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "NoteUpdate"}
if v.Text == nil {
invalidParams.Add(smithy.NewErrParamRequired("Text"))
}
if v.UpdatedBy == nil {
invalidParams.Add(smithy.NewErrParamRequired("UpdatedBy"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validatePatchSummary(v *types.PatchSummary) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "PatchSummary"}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateRelatedFinding(v *types.RelatedFinding) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "RelatedFinding"}
if v.ProductArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ProductArn"))
}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateRelatedFindingList(v []types.RelatedFinding) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "RelatedFindingList"}
for i := range v {
if err := validateRelatedFinding(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateResource(v *types.Resource) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Resource"}
if v.Type == nil {
invalidParams.Add(smithy.NewErrParamRequired("Type"))
}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateResourceList(v []types.Resource) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ResourceList"}
for i := range v {
if err := validateResource(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateStandardsSubscriptionRequest(v *types.StandardsSubscriptionRequest) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StandardsSubscriptionRequest"}
if v.StandardsArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("StandardsArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateStandardsSubscriptionRequests(v []types.StandardsSubscriptionRequest) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StandardsSubscriptionRequests"}
for i := range v {
if err := validateStandardsSubscriptionRequest(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateStatusReason(v *types.StatusReason) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StatusReason"}
if v.ReasonCode == nil {
invalidParams.Add(smithy.NewErrParamRequired("ReasonCode"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateStatusReasonsList(v []types.StatusReason) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "StatusReasonsList"}
for i := range v {
if err := validateStatusReason(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateVulnerability(v *types.Vulnerability) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Vulnerability"}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if v.Vendor != nil {
if err := validateVulnerabilityVendor(v.Vendor); err != nil {
invalidParams.AddNested("Vendor", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateVulnerabilityList(v []types.Vulnerability) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "VulnerabilityList"}
for i := range v {
if err := validateVulnerability(&v[i]); err != nil {
invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateVulnerabilityVendor(v *types.VulnerabilityVendor) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "VulnerabilityVendor"}
if v.Name == nil {
invalidParams.Add(smithy.NewErrParamRequired("Name"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpAcceptAdministratorInvitationInput(v *AcceptAdministratorInvitationInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AcceptAdministratorInvitationInput"}
if v.AdministratorId == nil {
invalidParams.Add(smithy.NewErrParamRequired("AdministratorId"))
}
if v.InvitationId == nil {
invalidParams.Add(smithy.NewErrParamRequired("InvitationId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpAcceptInvitationInput(v *AcceptInvitationInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "AcceptInvitationInput"}
if v.MasterId == nil {
invalidParams.Add(smithy.NewErrParamRequired("MasterId"))
}
if v.InvitationId == nil {
invalidParams.Add(smithy.NewErrParamRequired("InvitationId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpBatchDisableStandardsInput(v *BatchDisableStandardsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchDisableStandardsInput"}
if v.StandardsSubscriptionArns == nil {
invalidParams.Add(smithy.NewErrParamRequired("StandardsSubscriptionArns"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpBatchEnableStandardsInput(v *BatchEnableStandardsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchEnableStandardsInput"}
if v.StandardsSubscriptionRequests == nil {
invalidParams.Add(smithy.NewErrParamRequired("StandardsSubscriptionRequests"))
} else if v.StandardsSubscriptionRequests != nil {
if err := validateStandardsSubscriptionRequests(v.StandardsSubscriptionRequests); err != nil {
invalidParams.AddNested("StandardsSubscriptionRequests", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpBatchImportFindingsInput(v *BatchImportFindingsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchImportFindingsInput"}
if v.Findings == nil {
invalidParams.Add(smithy.NewErrParamRequired("Findings"))
} else if v.Findings != nil {
if err := validateBatchImportFindingsRequestFindingList(v.Findings); err != nil {
invalidParams.AddNested("Findings", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpBatchUpdateFindingsInput(v *BatchUpdateFindingsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "BatchUpdateFindingsInput"}
if v.FindingIdentifiers == nil {
invalidParams.Add(smithy.NewErrParamRequired("FindingIdentifiers"))
} else if v.FindingIdentifiers != nil {
if err := validateAwsSecurityFindingIdentifierList(v.FindingIdentifiers); err != nil {
invalidParams.AddNested("FindingIdentifiers", err.(smithy.InvalidParamsError))
}
}
if v.Note != nil {
if err := validateNoteUpdate(v.Note); err != nil {
invalidParams.AddNested("Note", err.(smithy.InvalidParamsError))
}
}
if v.RelatedFindings != nil {
if err := validateRelatedFindingList(v.RelatedFindings); err != nil {
invalidParams.AddNested("RelatedFindings", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpCreateActionTargetInput(v *CreateActionTargetInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "CreateActionTargetInput"}
if v.Name == nil {
invalidParams.Add(smithy.NewErrParamRequired("Name"))
}
if v.Description == nil {
invalidParams.Add(smithy.NewErrParamRequired("Description"))
}
if v.Id == nil {
invalidParams.Add(smithy.NewErrParamRequired("Id"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpCreateInsightInput(v *CreateInsightInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "CreateInsightInput"}
if v.Name == nil {
invalidParams.Add(smithy.NewErrParamRequired("Name"))
}
if v.Filters == nil {
invalidParams.Add(smithy.NewErrParamRequired("Filters"))
}
if v.GroupByAttribute == nil {
invalidParams.Add(smithy.NewErrParamRequired("GroupByAttribute"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpCreateMembersInput(v *CreateMembersInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "CreateMembersInput"}
if v.AccountDetails == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountDetails"))
} else if v.AccountDetails != nil {
if err := validateAccountDetailsList(v.AccountDetails); err != nil {
invalidParams.AddNested("AccountDetails", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeclineInvitationsInput(v *DeclineInvitationsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeclineInvitationsInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeleteActionTargetInput(v *DeleteActionTargetInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeleteActionTargetInput"}
if v.ActionTargetArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ActionTargetArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeleteInsightInput(v *DeleteInsightInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeleteInsightInput"}
if v.InsightArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeleteInvitationsInput(v *DeleteInvitationsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeleteInvitationsInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDeleteMembersInput(v *DeleteMembersInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DeleteMembersInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDescribeStandardsControlsInput(v *DescribeStandardsControlsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DescribeStandardsControlsInput"}
if v.StandardsSubscriptionArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("StandardsSubscriptionArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDisableImportFindingsForProductInput(v *DisableImportFindingsForProductInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DisableImportFindingsForProductInput"}
if v.ProductSubscriptionArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ProductSubscriptionArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDisableOrganizationAdminAccountInput(v *DisableOrganizationAdminAccountInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DisableOrganizationAdminAccountInput"}
if v.AdminAccountId == nil {
invalidParams.Add(smithy.NewErrParamRequired("AdminAccountId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpDisassociateMembersInput(v *DisassociateMembersInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "DisassociateMembersInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpEnableImportFindingsForProductInput(v *EnableImportFindingsForProductInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "EnableImportFindingsForProductInput"}
if v.ProductArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ProductArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpEnableOrganizationAdminAccountInput(v *EnableOrganizationAdminAccountInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "EnableOrganizationAdminAccountInput"}
if v.AdminAccountId == nil {
invalidParams.Add(smithy.NewErrParamRequired("AdminAccountId"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetInsightResultsInput(v *GetInsightResultsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetInsightResultsInput"}
if v.InsightArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpGetMembersInput(v *GetMembersInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "GetMembersInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpInviteMembersInput(v *InviteMembersInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "InviteMembersInput"}
if v.AccountIds == nil {
invalidParams.Add(smithy.NewErrParamRequired("AccountIds"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpListTagsForResourceInput(v *ListTagsForResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "ListTagsForResourceInput"}
if v.ResourceArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpTagResourceInput(v *TagResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"}
if v.ResourceArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
}
if v.Tags == nil {
invalidParams.Add(smithy.NewErrParamRequired("Tags"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUntagResourceInput(v *UntagResourceInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"}
if v.ResourceArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
}
if v.TagKeys == nil {
invalidParams.Add(smithy.NewErrParamRequired("TagKeys"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateActionTargetInput(v *UpdateActionTargetInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateActionTargetInput"}
if v.ActionTargetArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("ActionTargetArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateFindingsInput(v *UpdateFindingsInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateFindingsInput"}
if v.Filters == nil {
invalidParams.Add(smithy.NewErrParamRequired("Filters"))
}
if v.Note != nil {
if err := validateNoteUpdate(v.Note); err != nil {
invalidParams.AddNested("Note", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateInsightInput(v *UpdateInsightInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateInsightInput"}
if v.InsightArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("InsightArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateOrganizationConfigurationInput(v *UpdateOrganizationConfigurationInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateOrganizationConfigurationInput"}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
func validateOpUpdateStandardsControlInput(v *UpdateStandardsControlInput) error {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "UpdateStandardsControlInput"}
if v.StandardsControlArn == nil {
invalidParams.Add(smithy.NewErrParamRequired("StandardsControlArn"))
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
}
| {
if v == nil {
return nil
}
invalidParams := smithy.InvalidParamsError{Context: "Compliance"}
if v.StatusReasons != nil {
if err := validateStatusReasonsList(v.StatusReasons); err != nil {
invalidParams.AddNested("StatusReasons", err.(smithy.InvalidParamsError))
}
}
if invalidParams.Len() > 0 {
return invalidParams
} else {
return nil
}
} |
socialapp.py | """
Decouples SocialApp client credentials from the database
"""
from django.conf import settings
class SocialAppMixin:
| class Meta:
abstract = True
# Get credentials to be used by OAuth2Client
def get_app(self, request):
app = settings.SOCIAL_APPS.get(self.id)
from allauth.socialaccount.models import SocialApp
return SocialApp(
id=app.get('id'),
name='SocialApp instance',
provider=self.id,
client_id=app.get('client_id'),
secret=app.get('secret'),
key=''
) |
|
network_trainer.py | from _warnings import warn
import matplotlib
from batchgenerators.utilities.file_and_folder_operations import *
from sklearn.model_selection import KFold
matplotlib.use("agg")
from time import time, sleep
import torch
import numpy as np
from torch.optim import lr_scheduler
import matplotlib.pyplot as plt
import sys
from collections import OrderedDict
from datetime import datetime
import torch.backends.cudnn as cudnn
from abc import abstractmethod
from datetime import datetime
try:
from apex import amp
except ImportError:
amp = None
class NetworkTrainer(object):
def __init__(self, deterministic=True, fp16=False):
"""
A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such
as the training loop, tracking of training and validation losses (and the target metric if you implement it)
Training can be terminated early if the validation loss (or the target metric if implemented) do not improve
anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth
results.
What you need to override:
- __init__
- initialize
- run_online_evaluation (optional)
- finish_online_evaluation (optional)
- validate
- predict_test_case
"""
np.random.seed(12345)
torch.manual_seed(12345)
torch.cuda.manual_seed_all(12345)
self.fp16 = fp16
if deterministic:
cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
else:
cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
################# SET THESE IN self.initialize() ###################################
self.network = None
self.optimizer = None
self.lr_scheduler = None
self.tr_gen = self.val_gen = None
self.was_initialized = False
################# SET THESE IN INIT ################################################
self.output_folder = None
self.fold = None
self.loss = None
self.dataset_directory = None
################# SET THESE IN LOAD_DATASET OR DO_SPLIT ############################
self.dataset = None # these can be None for inference mode
self.dataset_tr = self.dataset_val = None # do not need to be used, they just appear if you are using the suggested load_dataset_and_do_split
################# THESE DO NOT NECESSARILY NEED TO BE MODIFIED #####################
self.patience = 50
self.val_eval_criterion_alpha = 0.9 # alpha * old + (1-alpha) * new
# if this is too low then the moving average will be too noisy and the training may terminate early. If it is
# too high the training will take forever
self.train_loss_MA_alpha = 0.93 # alpha * old + (1-alpha) * new
self.train_loss_MA_eps = 5e-4 # new MA must be at least this much better (smaller)
self.save_every = 50
self.save_latest_only = True
self.max_num_epochs = 1000
self.num_batches_per_epoch = 250
self.num_val_batches_per_epoch = 50
self.also_val_in_tr_mode = False
self.lr_threshold = 1e-6 # the network will not terminate training if the lr is still above this threshold
################# LEAVE THESE ALONE ################################################
self.val_eval_criterion_MA = None
self.train_loss_MA = None
self.best_val_eval_criterion_MA = None
self.best_MA_tr_loss_for_patience = None
self.best_epoch_based_on_MA_tr_loss = None
self.all_tr_losses = []
self.all_val_losses = []
self.all_val_losses_tr_mode = []
self.all_val_eval_metrics = [] # does not have to be used
self.epoch = 0
self.log_file = None
self.deterministic = deterministic
@abstractmethod
def initialize(self, training=True):
"""
create self.output_folder
modify self.output_folder if you are doing cross-validation (one folder per fold)
set self.tr_gen and self.val_gen
set self.network, self.optimizer and self.lr_scheduler
finally set self.was_initialized to True
:param training:
:return:
"""
@abstractmethod
def load_dataset(self):
pass
def do_split(self):
"""
This is a suggestion for if your dataset is a dictionary (my personal standard)
:return:
"""
splits_file = join(self.dataset_directory, "splits_final.pkl")
if not isfile(splits_file):
self.print_to_log_file("Creating new split...")
splits = []
all_keys_sorted = np.sort(list(self.dataset.keys()))
kfold = KFold(n_splits=5, shuffle=True, random_state=12345)
for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):
train_keys = np.array(all_keys_sorted)[train_idx]
test_keys = np.array(all_keys_sorted)[test_idx]
splits.append(OrderedDict())
splits[-1]['train'] = train_keys
splits[-1]['val'] = test_keys
save_pickle(splits, splits_file)
splits = load_pickle(splits_file)
if self.fold == "all":
tr_keys = val_keys = list(self.dataset.keys())
else:
tr_keys = splits[self.fold]['train']
val_keys = splits[self.fold]['val']
tr_keys.sort()
val_keys.sort()
self.dataset_tr = OrderedDict()
for i in tr_keys:
self.dataset_tr[i] = self.dataset[i]
self.dataset_val = OrderedDict()
for i in val_keys:
self.dataset_val[i] = self.dataset[i]
def plot_progress(self):
"""
Should probably by improved
:return:
"""
try:
font = {'weight': 'normal',
'size': 18}
matplotlib.rc('font', **font)
fig = plt.figure(figsize=(30, 24))
ax = fig.add_subplot(111)
ax2 = ax.twinx()
x_values = list(range(self.epoch + 1))
ax.plot(x_values, self.all_tr_losses, color='b', ls='-', label="loss_tr")
ax.plot(x_values, self.all_val_losses, color='r', ls='-', label="loss_val, train=False")
if len(self.all_val_losses_tr_mode) > 0:
ax.plot(x_values, self.all_val_losses_tr_mode, color='g', ls='-', label="loss_val, train=True")
if len(self.all_val_eval_metrics) == len(self.all_val_losses):
ax2.plot(x_values, self.all_val_eval_metrics, color='g', ls='--', label="evaluation metric")
ax.set_xlabel("epoch")
ax.set_ylabel("loss")
ax2.set_ylabel("evaluation metric")
ax.legend()
ax2.legend(loc=9)
fig.savefig(join(self.output_folder, "progress.png"))
plt.close()
except IOError:
self.print_to_log_file("failed to plot: ", sys.exc_info())
def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):
timestamp = time()
dt_object = datetime.fromtimestamp(timestamp)
if add_timestamp:
args = ("%s:" % dt_object, *args)
if self.log_file is None:
maybe_mkdir_p(self.output_folder)
timestamp = datetime.now()
self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" %
(timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, timestamp.second))
with open(self.log_file, 'w') as f:
f.write("Starting... \n")
successful = False
max_attempts = 5
ctr = 0
while not successful and ctr < max_attempts:
try:
with open(self.log_file, 'a+') as f:
for a in args:
f.write(str(a))
f.write(" ")
f.write("\n")
successful = True
except IOError:
print("%s: failed to log: " % datetime.fromtimestamp(timestamp), sys.exc_info())
sleep(0.5)
ctr += 1
if also_print_to_console:
print(*args)
def save_checkpoint(self, fname, save_optimizer=True):
start_time = time()
state_dict = self.network.state_dict()
for key in state_dict.keys():
state_dict[key] = state_dict[key].cpu()
lr_sched_state_dct = None
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
lr_sched_state_dct = self.lr_scheduler.state_dict()
for key in lr_sched_state_dct.keys():
lr_sched_state_dct[key] = lr_sched_state_dct[key]
if save_optimizer:
optimizer_state_dict = self.optimizer.state_dict()
else:
optimizer_state_dict = None
self.print_to_log_file("saving checkpoint...")
torch.save({
'epoch': self.epoch + 1,
'state_dict': state_dict,
'optimizer_state_dict': optimizer_state_dict,
'lr_scheduler_state_dict': lr_sched_state_dct,
'plot_stuff': (self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode,
self.all_val_eval_metrics)},
fname)
self.print_to_log_file("done, saving took %.2f seconds" % (time() - start_time))
def load_best_checkpoint(self, train=True):
if self.fold is None:
raise RuntimeError("Cannot load best checkpoint if self.fold is None")
self.load_checkpoint(join(self.output_folder, "model_best.model"), train=train)
def load_latest_checkpoint(self, train=True):
if isfile(join(self.output_folder, "model_final_checkpoint.model")):
return self.load_checkpoint(join(self.output_folder, "model_final_checkpoint.model"), train=train)
if isfile(join(self.output_folder, "model_latest.model")):
return self.load_checkpoint(join(self.output_folder, "model_latest.model"), train=train)
all_checkpoints = [i for i in os.listdir(self.output_folder) if i.endswith(".model") and i.find("_ep_") != -1]
if len(all_checkpoints) == 0:
|
corresponding_epochs = [int(i.split("_")[-1].split(".")[0]) for i in all_checkpoints]
checkpoint = all_checkpoints[np.argmax(corresponding_epochs)]
self.load_checkpoint(join(self.output_folder, checkpoint), train=train)
def load_checkpoint(self, fname, train=True):
self.print_to_log_file("loading checkpoint", fname, "train=", train)
if not self.was_initialized:
self.initialize(train)
saved_model = torch.load(fname, map_location=torch.device('cuda', torch.cuda.current_device()))
self.load_checkpoint_ram(saved_model, train)
def load_checkpoint_ram(self, saved_model, train=True):
"""
used for if the checkpoint is already in ram
:param saved_model:
:param train:
:return:
"""
if not self.was_initialized:
self.initialize(train)
new_state_dict = OrderedDict()
curr_state_dict_keys = list(self.network.state_dict().keys())
# if state dict comes form nn.DataParallel but we use non-parallel model here then the state dict keys do not
# match. Use heuristic to make it match
for k, value in saved_model['state_dict'].items():
key = k
if key not in curr_state_dict_keys:
key = key[7:]
new_state_dict[key] = value
self.network.load_state_dict(new_state_dict)
self.epoch = saved_model['epoch']
if train:
optimizer_state_dict = saved_model['optimizer_state_dict']
if optimizer_state_dict is not None:
self.optimizer.load_state_dict(optimizer_state_dict)
if self.lr_scheduler is not None and not isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
self.lr_scheduler.load_state_dict(saved_model['lr_scheduler_state_dict'])
self.all_tr_losses, self.all_val_losses, self.all_val_losses_tr_mode, self.all_val_eval_metrics = saved_model['plot_stuff']
def _maybe_init_amp(self):
# we use fp16 for training only, not inference
if self.fp16:
if amp is not None:
self.network, self.optimizer = amp.initialize(self.network, self.optimizer, opt_level="O1")
else:
self.print_to_log_file("WARNING: FP16 training was requested but nvidia apex is not installed. "
"Install it from https://github.com/NVIDIA/apex")
def run_training(self):
torch.cuda.empty_cache()
self._maybe_init_amp()
if cudnn.benchmark and cudnn.deterministic:
warn("torch.backends.cudnn.deterministic is True indicating a deterministic training is desired. "
"But torch.backends.cudnn.benchmark is True as well and this will prevent deterministic training! "
"If you want deterministic then set benchmark=False")
maybe_mkdir_p(self.output_folder)
if not self.was_initialized:
self.initialize(True)
while self.epoch < self.max_num_epochs:
self.print_to_log_file("\nepoch: ", self.epoch)
epoch_start_time = time()
train_losses_epoch = []
# train one epoch
self.network.train()
for b in range(self.num_batches_per_epoch):
l = self.run_iteration(self.tr_gen, True)
train_losses_epoch.append(l)
self.all_tr_losses.append(np.mean(train_losses_epoch))
self.print_to_log_file("train loss : %.4f" % self.all_tr_losses[-1])
with torch.no_grad():
# validation with train=False
self.network.eval()
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False, True)
val_losses.append(l)
self.all_val_losses.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=False): %.4f" % self.all_val_losses[-1])
if self.also_val_in_tr_mode:
self.network.train()
# validation with train=True
val_losses = []
for b in range(self.num_val_batches_per_epoch):
l = self.run_iteration(self.val_gen, False)
val_losses.append(l)
self.all_val_losses_tr_mode.append(np.mean(val_losses))
self.print_to_log_file("val loss (train=True): %.4f" % self.all_val_losses_tr_mode[-1])
epoch_end_time = time()
self.update_train_loss_MA() # needed for lr scheduler and stopping of training
continue_training = self.on_epoch_end()
if not continue_training:
# allows for early stopping
break
self.epoch += 1
self.print_to_log_file("This epoch took %f s\n" % (epoch_end_time-epoch_start_time))
self.save_checkpoint(join(self.output_folder, "model_final_checkpoint.model"))
# now we can delete latest as it will be identical with final
if isfile(join(self.output_folder, "model_latest.model")):
os.remove(join(self.output_folder, "model_latest.model"))
if isfile(join(self.output_folder, "model_latest.model.pkl")):
os.remove(join(self.output_folder, "model_latest.model.pkl"))
def maybe_update_lr(self):
# maybe update learning rate
if self.lr_scheduler is not None:
assert isinstance(self.lr_scheduler, (lr_scheduler.ReduceLROnPlateau, lr_scheduler._LRScheduler))
if isinstance(self.lr_scheduler, lr_scheduler.ReduceLROnPlateau):
# lr scheduler is updated with moving average val loss. should be more robust
self.lr_scheduler.step(self.train_loss_MA)
else:
self.lr_scheduler.step(self.epoch + 1)
self.print_to_log_file("lr is now (scheduler) %s" % str(self.optimizer.param_groups[0]['lr']))
def maybe_save_checkpoint(self):
"""
Saves a checkpoint every save_ever epochs.
:return:
"""
if self.epoch % self.save_every == (self.save_every - 1):
self.print_to_log_file("saving scheduled checkpoint file...")
if not self.save_latest_only:
self.save_checkpoint(join(self.output_folder, "model_ep_%03.0d.model" % (self.epoch + 1)))
self.save_checkpoint(join(self.output_folder, "model_latest.model"))
self.print_to_log_file("done")
def update_eval_criterion_MA(self):
"""
If self.all_val_eval_metrics is unused (len=0) then we fall back to using -self.all_val_losses for the MA to determine early stopping
(not a minimization, but a maximization of a metric and therefore the - in the latter case)
:return:
"""
if self.val_eval_criterion_MA is None:
if len(self.all_val_eval_metrics) == 0:
self.val_eval_criterion_MA = - self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.all_val_eval_metrics[-1]
else:
if len(self.all_val_eval_metrics) == 0:
"""
We here use alpha * old - (1 - alpha) * new because new in this case is the vlaidation loss and lower
is better, so we need to negate it.
"""
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA - (
1 - self.val_eval_criterion_alpha) * \
self.all_val_losses[-1]
else:
self.val_eval_criterion_MA = self.val_eval_criterion_alpha * self.val_eval_criterion_MA + (
1 - self.val_eval_criterion_alpha) * \
self.all_val_eval_metrics[-1]
def manage_patience(self):
# update patience
continue_training = True
if self.patience is not None:
# if best_MA_tr_loss_for_patience and best_epoch_based_on_MA_tr_loss were not yet initialized,
# initialize them
if self.best_MA_tr_loss_for_patience is None:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
if self.best_epoch_based_on_MA_tr_loss is None:
self.best_epoch_based_on_MA_tr_loss = self.epoch
if self.best_val_eval_criterion_MA is None:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
# check if the current epoch is the best one according to moving average of validation criterion. If so
# then save 'best' model
# Do not use this for validation. This is intended for test set prediction only.
self.print_to_log_file("current best_val_eval_criterion_MA is %.4f0" % self.best_val_eval_criterion_MA)
self.print_to_log_file("current val_eval_criterion_MA is %.4f" % self.val_eval_criterion_MA)
if self.val_eval_criterion_MA > self.best_val_eval_criterion_MA:
self.best_val_eval_criterion_MA = self.val_eval_criterion_MA
self.print_to_log_file("saving best epoch checkpoint...")
self.save_checkpoint(join(self.output_folder, "model_best.model"))
# Now see if the moving average of the train loss has improved. If yes then reset patience, else
# increase patience
if self.train_loss_MA + self.train_loss_MA_eps < self.best_MA_tr_loss_for_patience:
self.best_MA_tr_loss_for_patience = self.train_loss_MA
self.best_epoch_based_on_MA_tr_loss = self.epoch
self.print_to_log_file("New best epoch (train loss MA): %03.4f" % self.best_MA_tr_loss_for_patience)
else:
self.print_to_log_file("No improvement: current train MA %03.4f, best: %03.4f, eps is %03.4f" %
(self.train_loss_MA, self.best_MA_tr_loss_for_patience, self.train_loss_MA_eps))
# if patience has reached its maximum then finish training (provided lr is low enough)
if self.epoch - self.best_epoch_based_on_MA_tr_loss > self.patience:
if self.optimizer.param_groups[0]['lr'] > self.lr_threshold:
self.print_to_log_file("My patience ended, but I believe I need more time (lr > 1e-6)")
self.best_epoch_based_on_MA_tr_loss = self.epoch - self.patience // 2
else:
self.print_to_log_file("My patience ended")
continue_training = False
else:
self.print_to_log_file(
"Patience: %d/%d" % (self.epoch - self.best_epoch_based_on_MA_tr_loss, self.patience))
return continue_training
def on_epoch_end(self):
self.finish_online_evaluation() # does not have to do anything, but can be used to update self.all_val_eval_
# metrics
self.plot_progress()
self.maybe_update_lr()
self.maybe_save_checkpoint()
self.update_eval_criterion_MA()
continue_training = self.manage_patience()
continue_training = True
return continue_training
def update_train_loss_MA(self):
if self.train_loss_MA is None:
self.train_loss_MA = self.all_tr_losses[-1]
else:
self.train_loss_MA = self.train_loss_MA_alpha * self.train_loss_MA + (1 - self.train_loss_MA_alpha) * \
self.all_tr_losses[-1]
def run_iteration(self, data_generator, do_backprop=True, run_online_evaluation=False):
data_dict = next(data_generator)
data = data_dict['data']
target = data_dict['target']
if not isinstance(data, torch.Tensor):
data = torch.from_numpy(data).float()
if not isinstance(target, torch.Tensor):
target = torch.from_numpy(target).float()
data = data.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
self.optimizer.zero_grad()
output = self.network(data)
del data
l = self.loss(output, target)
if run_online_evaluation:
self.run_online_evaluation(output, target)
del target
if do_backprop:
if not self.fp16 or amp is None:
l.backward()
else:
with amp.scale_loss(l, self.optimizer) as scaled_loss:
scaled_loss.backward()
self.optimizer.step()
return l.detach().cpu().numpy()
def run_online_evaluation(self, *args, **kwargs):
"""
Can be implemented, does not have to
:param output_torch:
:param target_npy:
:return:
"""
pass
def finish_online_evaluation(self):
"""
Can be implemented, does not have to
:return:
"""
pass
@abstractmethod
def validate(self, *args, **kwargs):
pass
def find_lr(self, num_iters=1000, init_value=1e-6, final_value=10., beta=0.98):
"""
stolen and adapted from here: https://sgugger.github.io/how-do-you-find-a-good-learning-rate.html
:param num_iters:
:param init_value:
:param final_value:
:param beta:
:return:
"""
import math
self._maybe_init_amp()
mult = (final_value / init_value) ** (1/num_iters)
lr = init_value
self.optimizer.param_groups[0]['lr'] = lr
avg_loss = 0.
best_loss = 0.
losses = []
log_lrs = []
for batch_num in range(1, num_iters + 1):
# +1 because this one here is not designed to have negative loss...
loss = self.run_iteration(self.tr_gen, do_backprop=True, run_online_evaluation=False).data.item() + 1
# Compute the smoothed loss
avg_loss = beta * avg_loss + (1-beta) * loss
smoothed_loss = avg_loss / (1 - beta**batch_num)
# Stop if the loss is exploding
if batch_num > 1 and smoothed_loss > 4 * best_loss:
break
# Record the best loss
if smoothed_loss < best_loss or batch_num==1:
best_loss = smoothed_loss
# Store the values
losses.append(smoothed_loss)
log_lrs.append(math.log10(lr))
# Update the lr for the next step
lr *= mult
self.optimizer.param_groups[0]['lr'] = lr
import matplotlib.pyplot as plt
lrs = [10 ** i for i in log_lrs]
fig = plt.figure()
plt.xscale('log')
plt.plot(lrs[10:-5], losses[10:-5])
plt.savefig(join(self.output_folder, "lr_finder.png"))
plt.close()
return log_lrs, losses
| return self.load_best_checkpoint(train=train) |
008-drop_repair_unit_integration_test.go | // Copyright (C) 2017 ScyllaDB
// +build all integration
package migrate
import (
"context"
"testing"
"github.com/scylladb/go-log"
"github.com/scylladb/gocqlx/v2"
"github.com/scylladb/gocqlx/v2/migrate"
. "github.com/scylladb/scylla-manager/pkg/testutils"
"github.com/scylladb/scylla-manager/schema"
)
func TestCreateDefaultRepairTaskForClusterAfter008Integration(t *testing.T) | {
saveRegister()
defer restoreRegister()
session := CreateSessionWithoutMigration(t)
cb := findCallback("008-drop_repair_unit.cql", migrate.AfterMigration)
registerCallback("008-drop_repair_unit.cql", migrate.AfterMigration, func(ctx context.Context, session gocqlx.Session, logger log.Logger) error {
Print("Given: clusters")
const insertClusterCql = `INSERT INTO cluster (id) VALUES (uuid())`
ExecStmt(t, session, insertClusterCql)
ExecStmt(t, session, insertClusterCql)
Print("When: migrate")
if err := cb(ctx, session, logger); err != nil {
t.Fatal(err)
}
Print("Then: tasks are created")
const countSchedulerTask = `SELECT COUNT(*) FROM scheduler_task`
q := session.Query(countSchedulerTask, nil)
defer q.Release()
var count int
if err := q.Scan(&count); err != nil {
t.Fatal(err)
}
if count != 2 {
t.Fatal()
}
return nil
})
if err := migrate.FromFS(context.Background(), session, schema.Files); err != nil {
t.Fatal("migrate:", err)
}
} |
|
TradingEpisode.js | exports.newAlgorithmicTradingBotModulesTradingEpisode = function (processIndex) {
/*
This module packages all functions related to Episodes.
*/
let thisObject = {
mantain: mantain,
reset: reset,
openEpisode: openEpisode,
updateExitType: updateExitType,
closeEpisode: closeEpisode,
cycleBasedStatistics: cycleBasedStatistics,
initialize: initialize,
finalize: finalize
}
let tradingEngine
let tradingSystem
let sessionParameters
return thisObject
function initialize() {
tradingSystem = TS.projects.foundations.globals.processVariables.VARIABLES_BY_PROCESS_INDEX_MAP.get(processIndex).SIMULATION_STATE.tradingSystem
tradingEngine = TS.projects.foundations.globals.processVariables.VARIABLES_BY_PROCESS_INDEX_MAP.get(processIndex).SIMULATION_STATE.tradingEngine
sessionParameters = TS.projects.foundations.globals.processConstants.CONSTANTS_BY_PROCESS_INDEX_MAP.get(processIndex).SESSION_NODE.tradingParameters
}
function finalize() {
tradingEngine = undefined
tradingSystem = undefined
sessionParameters = undefined
}
function mantain() {
updateCounters()
updateStatistics()
updateEnds()
updateDistanceToTradingEventsCounters()
}
function reset() {
resetTradingEngineDataStructure()
}
function openEpisode() {
/*
This function is called each time the simulation starts. That does not mean the Episode
must be opened there, since it might happen that it is looping at the end of the market
or the task / session was restarted. We will only execute if the episode was never opened before.
*/
if (tradingEngine.tradingCurrent.tradingEpisode.begin.value !== tradingEngine.tradingCurrent.tradingEpisode.begin.config.initialValue) { return }
/* Starting begin and end */
tradingEngine.tradingCurrent.tradingEpisode.begin.value = tradingEngine.tradingCurrent.tradingEpisode.candle.begin.value
tradingEngine.tradingCurrent.tradingEpisode.end.value = tradingEngine.tradingCurrent.tradingEpisode.candle.end.value
/* Getting the begin Balance from the session configuration */
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value = sessionParameters.sessionBaseAsset.config.initialBalance
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value = sessionParameters.sessionQuotedAsset.config.initialBalance
/* The current balance is also the begin balance, that is how this starts. */
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value = sessionParameters.sessionBaseAsset.config.initialBalance
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value = sessionParameters.sessionQuotedAsset.config.initialBalance
/* Recording the opening at the Trading Engine Data Structure */
tradingEngine.tradingCurrent.tradingEpisode.status.value = 'Open'
tradingEngine.tradingCurrent.tradingEpisode.serialNumber.value = 1
tradingEngine.tradingCurrent.tradingEpisode.identifier.value = SA.projects.foundations.utilities.miscellaneousFunctions.genereteUniqueId()
tradingEngine.tradingCurrent.tradingEpisode.beginRate.value = tradingEngine.tradingCurrent.tradingEpisode.candle.close.value
}
function updateExitType(exitType) {
tradingEngine.tradingCurrent.tradingEpisode.exitType.value = exitType
}
function closeEpisode() {
tradingEngine.tradingCurrent.tradingEpisode.status.value = 'Closed'
tradingEngine.tradingCurrent.tradingEpisode.end.value = tradingEngine.tradingCurrent.tradingEpisode.candle.end.value
tradingEngine.tradingCurrent.tradingEpisode.endRate.value = tradingEngine.tradingCurrent.tradingEpisode.candle.close.value
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.endBalance.value = tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.endBalance.value = tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value
}
function resetEpisode() {
TS.projects.foundations.globals.processModuleObjects.MODULE_OBJECTS_BY_PROCESS_INDEX_MAP.get(processIndex).TRADING_ENGINE_MODULE_OBJECT.initializeNode(tradingEngine.tradingCurrent.learningEpisode)
}
function updateEnds() {
if (tradingEngine.tradingCurrent.tradingEpisode.status.value === 'Open') {
tradingEngine.tradingCurrent.tradingEpisode.end.value = tradingEngine.tradingCurrent.tradingEpisode.end.value + sessionParameters.timeFrame.config.value
tradingEngine.tradingCurrent.tradingEpisode.endRate.value = tradingEngine.tradingCurrent.tradingEpisode.candle.close.value
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.endBalance.value = tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.endBalance.value = tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value
}
}
function resetTradingEngineDataStructure() {
if (tradingEngine.tradingCurrent.tradingEpisode.status.value === 'Closed') {
resetEpisode()
}
}
function updateCounters() {
if (tradingEngine.tradingCurrent.tradingEpisode.status.value === 'Open') {
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeCounters.periods.value++
}
}
function updateStatistics() {
/* Daus Calculation */
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.days.value =
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeCounters.periods.value *
sessionParameters.timeFrame.config.value /
SA.projects.foundations.globals.timeConstants.ONE_DAY_IN_MILISECONDS
}
function cycleBasedStatistics() {
calculateAssetsStatistics()
calculateEpisodeStatistics()
function calculateAssetsStatistics() {
/* Updating Profit Loss */
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.balance.value -
sessionParameters.sessionBaseAsset.config.initialBalance
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.balance.value -
sessionParameters.sessionQuotedAsset.config.initialBalance
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value, 10)
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value, 10)
/*
Updating ROI
https://www.investopedia.com/articles/basics/10/guide-to-calculating-roi.asp
*/
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.ROI.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value /
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value * 100
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.ROI.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value /
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value * 100
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.ROI.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.ROI.value, 10)
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.ROI.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.ROI.value, 10)
/* Updating Hit Ratio */
if (tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeCounters.positions.value > 0) {
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hitRatio.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hits.value /
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeCounters.positions.value
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hitRatio.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hits.value /
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeCounters.positions.value
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hitRatio.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hitRatio.value, 10)
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hitRatio.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hitRatio.value, 10)
}
/*
Updating Annualized Rate Of Return
https://www.investopedia.com/terms/a/annualized-rate.asp
*/
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.annualizedRateOfReturn.value =
Math.pow( | ,
(365 / tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.days.value)
) - 1
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.annualizedRateOfReturn.value =
Math.pow(
(
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value
) / tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value
,
(365 / tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.days.value)
) - 1
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.annualizedRateOfReturn.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.annualizedRateOfReturn.value, 10)
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.annualizedRateOfReturn.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.annualizedRateOfReturn.value, 10)
/* Updating Hit or Fail */
if (tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value > 0) {
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hitFail.value = 'Hit'
} else {
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.hitFail.value = 'Fail'
}
if (tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value > 0) {
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hitFail.value = 'Hit'
} else {
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.hitFail.value = 'Fail'
}
}
function calculateEpisodeStatistics() {
/* Updating Profit Loss */
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.profitLoss.value =
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value * tradingEngine.tradingCurrent.tradingEpisode.candle.close.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.profitLoss.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.profitLoss.value, 10)
/*
Updating ROI
https://www.investopedia.com/articles/basics/10/guide-to-calculating-roi.asp
*/
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.ROI.value =
(
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value * tradingEngine.tradingCurrent.tradingEpisode.endRate.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value
) / (
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value * tradingEngine.tradingCurrent.tradingEpisode.beginRate.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value
) * 100
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.ROI.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.ROI.value, 10)
/*
Updating Annualized Rate Of Return
https://www.investopedia.com/terms/a/annualized-rate.asp
*/
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.annualizedRateOfReturn.value =
Math.pow(
(
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value * tradingEngine.tradingCurrent.tradingEpisode.beginRate.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.profitLoss.value
) /
(
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value * tradingEngine.tradingCurrent.tradingEpisode.beginRate.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeQuotedAsset.beginBalance.value
)
,
(365 / tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.days.value)
) - 1
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.annualizedRateOfReturn.value = TS.projects.foundations.utilities.miscellaneousFunctions.truncateToThisPrecision(tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.annualizedRateOfReturn.value, 10)
/* Updating Hit or Fail */
if (tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.profitLoss.value > 0) {
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.hitFail.value = 'Hit'
} else {
tradingEngine.tradingCurrent.tradingEpisode.tradingEpisodeStatistics.hitFail.value = 'Fail'
}
}
}
function updateDistanceToTradingEventsCounters() {
/* Keeping Distance Counters Up-to-date while avoinding counting before the first event happens. */
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.triggerOn.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.triggerOn.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.triggerOff.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.triggerOff.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.takePosition.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.takePosition.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.closePosition.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.closePosition.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.nextPhase.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.nextPhase.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.moveToPhase.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.moveToPhase.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.createOrder.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.createOrder.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.cancelOrder.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.cancelOrder.value++
}
if (
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.closeOrder.value > 0
) {
tradingEngine.tradingCurrent.tradingEpisode.distanceToTradingEvent.closeOrder.value++
}
}
} | (
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value +
tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.profitLoss.value
) / tradingEngine.tradingCurrent.tradingEpisode.episodeBaseAsset.beginBalance.value |
source_util.rs | use syntax::{ast, panictry};
use syntax::ext::base::{self, *};
use syntax::parse::{self, token, DirectoryOwnership};
use syntax::print::pprust;
use syntax::ptr::P;
use syntax::symbol::Symbol;
use syntax::tokenstream::TokenStream;
use syntax::early_buffered_lints::BufferedEarlyLintId;
use smallvec::SmallVec;
use syntax_pos::{self, Pos, Span};
use rustc_data_structures::sync::Lrc;
// These macros all relate to the file system; they either return
// the column/row/filename of the expression, or they include
// a given file into the current one.
/// line!(): expands to the current line number
pub fn expand_line(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "line!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
}
/* column!(): expands to the current column number */
pub fn expand_column(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "column!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32 + 1))
}
/// file!(): expands to the current filename */
/// The source_file (`loc.file`) contains a bunch more information we could spit
/// out if we wanted.
pub fn expand_file(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "file!");
let topmost = cx.expansion_cause().unwrap_or(sp);
let loc = cx.source_map().lookup_char_pos(topmost.lo());
base::MacEager::expr(cx.expr_str(topmost, Symbol::intern(&loc.file.name.to_string())))
}
pub fn expand_stringify(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
let s = pprust::tts_to_string(tts);
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&s)))
}
pub fn expand_mod(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
base::check_zero_tts(cx, sp, tts, "module_path!");
let mod_path = &cx.current_expansion.module.mod_path;
let string = mod_path.iter().map(|x| x.to_string()).collect::<Vec<String>>().join("::");
base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&string)))
}
/// include! : parse the given file as an expr
/// This is generally a bad idea because it's going to behave
/// unhygienically.
pub fn expand_include<'cx>(cx: &'cx mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'cx> {
let file = match get_single_str_from_tts(cx, sp, tts, "include!") {
Some(f) => f,
None => return DummyResult::any(sp),
};
// The file will be added to the code map by the parser
let file = cx.resolve_path(file, sp);
let directory_ownership = DirectoryOwnership::Owned { relative: None };
let p = parse::new_sub_parser_from_file(cx.parse_sess(), &file, directory_ownership, None, sp);
struct ExpandResult<'a> {
p: parse::parser::Parser<'a>,
}
impl<'a> base::MacResult for ExpandResult<'a> {
fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
let r = panictry!(self.p.parse_expr());
if self.p.token != token::Eof {
self.p.sess.buffer_lint(
BufferedEarlyLintId::IncompleteInclude,
self.p.token.span,
ast::CRATE_NODE_ID,
"include macro expected single expression in source",
);
}
Some(r)
}
fn make_items(mut self: Box<ExpandResult<'a>>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
let mut ret = SmallVec::new();
while self.p.token != token::Eof {
match panictry!(self.p.parse_item()) {
Some(item) => ret.push(item),
None => self.p.sess.span_diagnostic.span_fatal(self.p.token.span,
&format!("expected item, found `{}`",
self.p.this_token_to_string()))
.raise()
}
}
Some(ret)
}
}
Box::new(ExpandResult { p })
}
// include_str! : read the given file, insert it as a literal string expr
pub fn expand_include_str(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> {
let file = match get_single_str_from_tts(cx, sp, tts, "include_str!") {
Some(f) => f,
None => return DummyResult::any(sp)
};
let file = cx.resolve_path(file, sp);
match cx.source_map().load_binary_file(&file) {
Ok(bytes) => match std::str::from_utf8(&bytes) {
Ok(src) => {
let interned_src = Symbol::intern(&src);
base::MacEager::expr(cx.expr_str(sp, interned_src))
}
Err(_) => {
cx.span_err(sp, &format!("{} wasn't a utf-8 file", file.display()));
DummyResult::any(sp)
}
},
Err(e) => {
cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
DummyResult::any(sp)
}
}
}
pub fn expand_include_bytes(cx: &mut ExtCtxt<'_>, sp: Span, tts: TokenStream)
-> Box<dyn base::MacResult+'static> | {
let file = match get_single_str_from_tts(cx, sp, tts, "include_bytes!") {
Some(f) => f,
None => return DummyResult::any(sp)
};
let file = cx.resolve_path(file, sp);
match cx.source_map().load_binary_file(&file) {
Ok(bytes) => {
base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::new(bytes))))
},
Err(e) => {
cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
DummyResult::any(sp)
}
}
} |
|
resttest.go | /*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resttest
import (
"fmt"
"reflect"
"strings"
"testing"
"time"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/validation/path"
metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/registry/rest"
)
type Tester struct {
*testing.T
storage rest.Storage
clusterScope bool
createOnUpdate bool
generatesName bool
returnDeletedObject bool
namer func(int) string
}
func | (t *testing.T, storage rest.Storage) *Tester {
return &Tester{
T: t,
storage: storage,
namer: defaultNamer,
}
}
func defaultNamer(i int) string {
return fmt.Sprintf("foo%d", i)
}
// Namer allows providing a custom name maker
// By default "foo%d" is used
func (t *Tester) Namer(namer func(int) string) *Tester {
t.namer = namer
return t
}
func (t *Tester) ClusterScope() *Tester {
t.clusterScope = true
return t
}
func (t *Tester) AllowCreateOnUpdate() *Tester {
t.createOnUpdate = true
return t
}
func (t *Tester) GeneratesName() *Tester {
t.generatesName = true
return t
}
func (t *Tester) ReturnDeletedObject() *Tester {
t.returnDeletedObject = true
return t
}
// TestNamespace returns the namespace that will be used when creating contexts.
// Returns NamespaceNone for cluster-scoped objects.
func (t *Tester) TestNamespace() string {
if t.clusterScope {
return metav1.NamespaceNone
}
return "test"
}
// TestContext returns a namespaced context that will be used when making storage calls.
// Namespace is determined by TestNamespace()
func (t *Tester) TestContext() genericapirequest.Context {
if t.clusterScope {
return genericapirequest.NewContext()
}
return genericapirequest.WithNamespace(genericapirequest.NewContext(), t.TestNamespace())
}
func (t *Tester) getObjectMetaOrFail(obj runtime.Object) metav1.Object {
objMeta, err := meta.Accessor(obj)
if err != nil {
t.Fatalf("object does not have ObjectMeta: %v\n%#v", err, obj)
}
return objMeta
}
func (t *Tester) setObjectMeta(obj runtime.Object, name string) {
meta := t.getObjectMetaOrFail(obj)
meta.SetName(name)
if t.clusterScope {
meta.SetNamespace(metav1.NamespaceNone)
} else {
meta.SetNamespace(genericapirequest.NamespaceValue(t.TestContext()))
}
meta.SetGenerateName("")
meta.SetGeneration(1)
}
type AssignFunc func([]runtime.Object) []runtime.Object
type EmitFunc func(runtime.Object, string) error
type GetFunc func(genericapirequest.Context, runtime.Object) (runtime.Object, error)
type InitWatchFunc func()
type InjectErrFunc func(err error)
type IsErrorFunc func(err error) bool
type CreateFunc func(genericapirequest.Context, runtime.Object) error
type SetRVFunc func(uint64)
type UpdateFunc func(runtime.Object) runtime.Object
// Test creating an object.
func (t *Tester) TestCreate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, invalid ...runtime.Object) {
t.testCreateHasMetadata(valid.DeepCopyObject())
if !t.generatesName {
t.testCreateGeneratesName(valid.DeepCopyObject())
}
t.testCreateEquals(valid.DeepCopyObject(), getFn)
t.testCreateAlreadyExisting(valid.DeepCopyObject(), createFn)
if t.clusterScope {
t.testCreateDiscardsObjectNamespace(valid.DeepCopyObject())
t.testCreateIgnoresContextNamespace(valid.DeepCopyObject())
t.testCreateIgnoresMismatchedNamespace(valid.DeepCopyObject())
t.testCreateResetsUserData(valid.DeepCopyObject())
} else {
t.testCreateRejectsMismatchedNamespace(valid.DeepCopyObject())
}
t.testCreateInvokesValidation(invalid...)
t.testCreateValidatesNames(valid.DeepCopyObject())
t.testCreateIgnoreClusterName(valid.DeepCopyObject())
}
// Test updating an object.
func (t *Tester) TestUpdate(valid runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc, invalidUpdateFn ...UpdateFunc) {
t.testUpdateEquals(valid.DeepCopyObject(), createFn, getFn, updateFn)
t.testUpdateFailsOnVersionTooOld(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateOnNotFound(valid.DeepCopyObject())
if !t.clusterScope {
t.testUpdateRejectsMismatchedNamespace(valid.DeepCopyObject(), createFn, getFn)
}
t.testUpdateInvokesValidation(valid.DeepCopyObject(), createFn, invalidUpdateFn...)
t.testUpdateWithWrongUID(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateRetrievesOldObject(valid.DeepCopyObject(), createFn, getFn)
t.testUpdatePropagatesUpdatedObjectError(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateIgnoreGenerationUpdates(valid.DeepCopyObject(), createFn, getFn)
t.testUpdateIgnoreClusterName(valid.DeepCopyObject(), createFn, getFn)
}
// Test deleting an object.
func (t *Tester) TestDelete(valid runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
t.testDeleteNonExist(valid.DeepCopyObject())
t.testDeleteNoGraceful(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn)
t.testDeleteWithUID(valid.DeepCopyObject(), createFn, getFn, isNotFoundFn)
}
// Test gracefully deleting an object.
func (t *Tester) TestDeleteGraceful(valid runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
t.testDeleteGracefulHasDefault(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulWithValue(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulUsesZeroOnNil(valid.DeepCopyObject(), createFn, expectedGrace)
t.testDeleteGracefulExtend(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulShorten(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
t.testDeleteGracefulImmediate(valid.DeepCopyObject(), createFn, getFn, expectedGrace)
}
// Test getting object.
func (t *Tester) TestGet(valid runtime.Object) {
t.testGetFound(valid.DeepCopyObject())
t.testGetNotFound(valid.DeepCopyObject())
t.testGetMimatchedNamespace(valid.DeepCopyObject())
if !t.clusterScope {
t.testGetDifferentNamespace(valid.DeepCopyObject())
}
}
// Test listing objects.
func (t *Tester) TestList(valid runtime.Object, assignFn AssignFunc) {
t.testListNotFound(assignFn)
t.testListFound(valid.DeepCopyObject(), assignFn)
t.testListMatchLabels(valid.DeepCopyObject(), assignFn)
t.testListTableConversion(valid.DeepCopyObject(), assignFn)
}
// Test watching objects.
func (t *Tester) TestWatch(
valid runtime.Object, emitFn EmitFunc,
labelsPass, labelsFail []labels.Set, fieldsPass, fieldsFail []fields.Set, actions []string) {
t.testWatchLabels(valid.DeepCopyObject(), emitFn, labelsPass, labelsFail, actions)
t.testWatchFields(valid.DeepCopyObject(), emitFn, fieldsPass, fieldsFail, actions)
}
// =============================================================================
// Creation tests.
func (t *Tester) delete(ctx genericapirequest.Context, obj runtime.Object) error {
objectMeta, err := meta.Accessor(obj)
if err != nil {
return err
}
deleter, ok := t.storage.(rest.GracefulDeleter)
if !ok {
return fmt.Errorf("Expected deleting storage, got %v", t.storage)
}
_, _, err = deleter.Delete(ctx, objectMeta.GetName(), nil)
return err
}
func (t *Tester) testCreateAlreadyExisting(obj runtime.Object, createFn CreateFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, foo)
_, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, false)
if !errors.IsAlreadyExists(err) {
t.Errorf("expected already exists err, got %v", err)
}
}
func (t *Tester) testCreateEquals(obj runtime.Object, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
created, err := t.storage.(rest.Creater).Create(ctx, foo, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
defer t.delete(ctx, created)
got, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Set resource version which might be unset in created object.
createdMeta := t.getObjectMetaOrFail(created)
gotMeta := t.getObjectMetaOrFail(got)
createdMeta.SetResourceVersion(gotMeta.GetResourceVersion())
if e, a := created, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testCreateDiscardsObjectNamespace(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
// Ignore non-empty namespace in object meta
objectMeta.SetNamespace("not-default")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(t.TestContext(), valid.DeepCopyObject(), rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateGeneratesName(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName("")
objectMeta.SetGenerateName("test-")
created, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), created)
if objectMeta.GetName() == "test-" || !strings.HasPrefix(objectMeta.GetName(), "test-") {
t.Errorf("unexpected name: %#v", valid)
}
}
func (t *Tester) testCreateHasMetadata(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName(t.namer(1))
objectMeta.SetNamespace(t.TestNamespace())
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if obj == nil {
t.Fatalf("Unexpected object from result: %#v", obj)
}
defer t.delete(t.TestContext(), obj)
if !metav1.HasObjectMetaSystemFieldValues(objectMeta) {
t.Errorf("storage did not populate object meta field values")
}
}
func (t *Tester) testCreateIgnoresContextNamespace(valid runtime.Object) {
// Ignore non-empty namespace in context
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "not-default2")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(ctx, valid.DeepCopyObject(), rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(ctx, created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateIgnoresMismatchedNamespace(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
// Ignore non-empty namespace in object meta
objectMeta.SetNamespace("not-default")
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), "not-default2")
// Ideally, we'd get an error back here, but at least verify the namespace wasn't persisted
created, err := t.storage.(rest.Creater).Create(ctx, valid.DeepCopyObject(), rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(ctx, created)
createdObjectMeta := t.getObjectMetaOrFail(created)
if createdObjectMeta.GetNamespace() != metav1.NamespaceNone {
t.Errorf("Expected empty namespace on created object, got '%v'", createdObjectMeta.GetNamespace())
}
}
func (t *Tester) testCreateValidatesNames(valid runtime.Object) {
for _, invalidName := range path.NameMayNotBe {
objCopy := valid.DeepCopyObject()
objCopyMeta := t.getObjectMetaOrFail(objCopy)
objCopyMeta.SetName(invalidName)
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, objCopy, rest.ValidateAllObjectFunc, false)
if !errors.IsInvalid(err) {
t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidName, err)
}
}
for _, invalidSuffix := range path.NameMayNotContain {
objCopy := valid.DeepCopyObject()
objCopyMeta := t.getObjectMetaOrFail(objCopy)
objCopyMeta.SetName(objCopyMeta.GetName() + invalidSuffix)
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, objCopy, rest.ValidateAllObjectFunc, false)
if !errors.IsInvalid(err) {
t.Errorf("%s: Expected to get an invalid resource error, got '%v'", invalidSuffix, err)
}
}
}
func (t *Tester) testCreateInvokesValidation(invalid ...runtime.Object) {
for i, obj := range invalid {
ctx := t.TestContext()
_, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, false)
if !errors.IsInvalid(err) {
t.Errorf("%d: Expected to get an invalid resource error, got %v", i, err)
}
}
}
func (t *Tester) testCreateRejectsMismatchedNamespace(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetNamespace("not-default")
_, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, false)
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !strings.Contains(err.Error(), "does not match the namespace sent on the request") {
t.Errorf("Expected 'does not match the namespace sent on the request' error, got '%v'", err.Error())
}
}
func (t *Tester) testCreateResetsUserData(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
now := metav1.Now()
objectMeta.SetUID("bad-uid")
objectMeta.SetCreationTimestamp(now)
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if obj == nil {
t.Fatalf("Unexpected object from result: %#v", obj)
}
defer t.delete(t.TestContext(), obj)
if objectMeta.GetUID() == "bad-uid" || objectMeta.GetCreationTimestamp() == now {
t.Errorf("ObjectMeta did not reset basic fields: %#v", objectMeta)
}
}
func (t *Tester) testCreateIgnoreClusterName(valid runtime.Object) {
objectMeta := t.getObjectMetaOrFail(valid)
objectMeta.SetName(t.namer(3))
objectMeta.SetClusterName("clustername-to-ignore")
obj, err := t.storage.(rest.Creater).Create(t.TestContext(), valid.DeepCopyObject(), rest.ValidateAllObjectFunc, false)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
defer t.delete(t.TestContext(), obj)
createdObjectMeta := t.getObjectMetaOrFail(obj)
if len(createdObjectMeta.GetClusterName()) != 0 {
t.Errorf("Expected empty clusterName on created object, got '%v'", createdObjectMeta.GetClusterName())
}
}
// =============================================================================
// Update tests.
func (t *Tester) testUpdateEquals(obj runtime.Object, createFn CreateFunc, getFn GetFunc, updateFn UpdateFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
toUpdate, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
toUpdate = updateFn(toUpdate)
toUpdateMeta := t.getObjectMetaOrFail(toUpdate)
updated, created, err := t.storage.(rest.Updater).Update(ctx, toUpdateMeta.GetName(), rest.DefaultUpdatedObjectInfo(toUpdate), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if created {
t.Errorf("unexpected creation")
}
got, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// Set resource version which might be unset in created object.
updatedMeta := t.getObjectMetaOrFail(updated)
gotMeta := t.getObjectMetaOrFail(got)
updatedMeta.SetResourceVersion(gotMeta.GetResourceVersion())
if e, a := updated, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testUpdateFailsOnVersionTooOld(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(3))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetResourceVersion("1")
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsConflict(err) {
t.Errorf("Expected Conflict error, got '%v'", err)
}
}
func (t *Tester) testUpdateInvokesValidation(obj runtime.Object, createFn CreateFunc, invalidUpdateFn ...UpdateFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(4))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
for _, update := range invalidUpdateFn {
toUpdate := update(foo.DeepCopyObject())
toUpdateMeta := t.getObjectMetaOrFail(toUpdate)
got, created, err := t.storage.(rest.Updater).Update(t.TestContext(), toUpdateMeta.GetName(), rest.DefaultUpdatedObjectInfo(toUpdate), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if got != nil || created {
t.Errorf("expected nil object and no creation for object: %v", toUpdate)
}
if !errors.IsInvalid(err) && !errors.IsBadRequest(err) {
t.Errorf("expected invalid or bad request error, got %v", err)
}
}
}
func (t *Tester) testUpdateWithWrongUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(5))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetUID(types.UID("UID0000"))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta.SetUID(types.UID("UID1111"))
obj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.GetName(), rest.DefaultUpdatedObjectInfo(foo), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if created || obj != nil {
t.Errorf("expected nil object and no creation for object: %v", foo)
}
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
}
func (t *Tester) testUpdateRetrievesOldObject(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(6))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetAnnotations(map[string]string{"A": "1"})
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
return
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
storedFooWithUpdates := storedFoo.DeepCopyObject()
objectMeta = t.getObjectMetaOrFail(storedFooWithUpdates)
objectMeta.SetAnnotations(map[string]string{"A": "2"})
// Make sure a custom transform is called, and sees the expected updatedObject and oldObject
// This tests the mechanism used to pass the old and new object to admission
calledUpdatedObject := 0
noopTransform := func(_ genericapirequest.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) {
if !reflect.DeepEqual(storedFoo, oldObject) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFoo, oldObject)
}
if !reflect.DeepEqual(storedFooWithUpdates, updatedObject) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", storedFooWithUpdates, updatedObject)
}
calledUpdatedObject++
return updatedObject, nil
}
updatedObj, created, err := t.storage.(rest.Updater).Update(ctx, objectMeta.GetName(), rest.DefaultUpdatedObjectInfo(storedFooWithUpdates, noopTransform), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if created {
t.Errorf("expected no creation for object")
return
}
if updatedObj == nil {
t.Errorf("expected non-nil object from update")
return
}
if calledUpdatedObject != 1 {
t.Errorf("expected UpdatedObject() to be called 1 time, was called %d", calledUpdatedObject)
return
}
}
func (t *Tester) testUpdatePropagatesUpdatedObjectError(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(7)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
return
}
// Make sure our transform is called, and sees the expected updatedObject and oldObject
propagateErr := fmt.Errorf("custom updated object error for %v", foo)
noopTransform := func(_ genericapirequest.Context, updatedObject runtime.Object, oldObject runtime.Object) (runtime.Object, error) {
return nil, propagateErr
}
_, _, err := t.storage.(rest.Updater).Update(ctx, name, rest.DefaultUpdatedObjectInfo(foo, noopTransform), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err != propagateErr {
t.Errorf("expected propagated error, got %#v", err)
}
}
func (t *Tester) testUpdateIgnoreGenerationUpdates(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(8)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetGeneration(2)
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
updatedFoo, err := getFn(ctx, older)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if exp, got := int64(1), t.getObjectMetaOrFail(updatedFoo).GetGeneration(); exp != got {
t.Errorf("Unexpected generation update: expected %d, got %d", exp, got)
}
}
func (t *Tester) testUpdateOnNotFound(obj runtime.Object) {
t.setObjectMeta(obj, t.namer(0))
_, created, err := t.storage.(rest.Updater).Update(t.TestContext(), t.namer(0), rest.DefaultUpdatedObjectInfo(obj), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if t.createOnUpdate {
if err != nil {
t.Errorf("creation allowed on updated, but got an error: %v", err)
}
if !created {
t.Errorf("creation allowed on update, but object not created")
}
} else {
if err == nil {
t.Errorf("Expected an error, but we didn't get one")
} else if !errors.IsNotFound(err) {
t.Errorf("Expected NotFound error, got '%v'", err)
}
}
}
func (t *Tester) testUpdateRejectsMismatchedNamespace(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(storedFoo)
objectMeta.SetName(t.namer(1))
objectMeta.SetNamespace("not-default")
obj, updated, err := t.storage.(rest.Updater).Update(t.TestContext(), "foo1", rest.DefaultUpdatedObjectInfo(storedFoo), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if obj != nil || updated {
t.Errorf("expected nil object and not updated")
}
if err == nil {
t.Errorf("expected an error, but didn't get one")
} else if !strings.Contains(err.Error(), "does not match the namespace sent on the request") {
t.Errorf("expected 'does not match the namespace sent on the request' error, got '%v'", err.Error())
}
}
func (t *Tester) testUpdateIgnoreClusterName(obj runtime.Object, createFn CreateFunc, getFn GetFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
name := t.namer(9)
t.setObjectMeta(foo, name)
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
storedFoo, err := getFn(ctx, foo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
older := storedFoo.DeepCopyObject()
olderMeta := t.getObjectMetaOrFail(older)
olderMeta.SetClusterName("clustername-to-ignore")
_, _, err = t.storage.(rest.Updater).Update(t.TestContext(), olderMeta.GetName(), rest.DefaultUpdatedObjectInfo(older), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
updatedFoo, err := getFn(ctx, older)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if clusterName := t.getObjectMetaOrFail(updatedFoo).GetClusterName(); len(clusterName) != 0 {
t.Errorf("Unexpected clusterName update: expected empty, got %v", clusterName)
}
}
// =============================================================================
// Deletion tests.
func (t *Tester) testDeleteNoGraceful(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
obj, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(10))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasDeleted {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
if !t.returnDeletedObject {
if status, ok := obj.(*metav1.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != metav1.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if err == nil || !isNotFoundFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
func (t *Tester) testDeleteNonExist(obj runtime.Object) {
objectMeta := t.getObjectMetaOrFail(obj)
_, _, err := t.storage.(rest.GracefulDeleter).Delete(t.TestContext(), objectMeta.GetName(), nil)
if err == nil || !errors.IsNotFound(err) {
t.Errorf("unexpected error: %v", err)
}
}
// This test the fast-fail path. We test that the precondition gets verified
// again before deleting the object in tests of pkg/storage/etcd.
func (t *Tester) testDeleteWithUID(obj runtime.Object, createFn CreateFunc, getFn GetFunc, isNotFoundFn IsErrorFunc) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
objectMeta := t.getObjectMetaOrFail(foo)
objectMeta.SetUID(types.UID("UID0000"))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
obj, _, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewPreconditionDeleteOptions("UID1111"))
if err == nil || !errors.IsConflict(err) {
t.Errorf("unexpected error: %v", err)
}
obj, _, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewPreconditionDeleteOptions("UID0000"))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !t.returnDeletedObject {
if status, ok := obj.(*metav1.Status); !ok {
t.Errorf("expected status of delete, got %v", status)
} else if status.Status != metav1.StatusSuccess {
t.Errorf("expected success, got: %v", status.Status)
}
}
_, err = getFn(ctx, foo)
if err == nil || !isNotFoundFn(err) {
t.Errorf("unexpected error: %v", err)
}
}
// =============================================================================
// Graceful Deletion tests.
func (t *Tester) testDeleteGracefulHasDefault(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(1))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), &metav1.DeleteOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Fatalf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulWithValue(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(2))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace+2 {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulExtend(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(3))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
// second delete duration is ignored
_, wasDeleted, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(expectedGrace+2))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulImmediate(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, "foo4")
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
generation := objectMeta.GetGeneration()
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
if _, err := getFn(ctx, foo); err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
// second delete is immediate, resource is deleted
out, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(0))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted != true {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
_, err = t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should be deleted immediately: %v", err)
}
objectMeta = t.getObjectMetaOrFail(out)
// the second delete shouldn't update the object, so the objectMeta.GetDeletionGracePeriodSeconds() should eqaul to the value set in the first delete.
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil || *objectMeta.GetDeletionGracePeriodSeconds() != 0 {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
if generation >= objectMeta.GetGeneration() {
t.Error("Generation wasn't bumped when deletion timestamp was set")
}
}
func (t *Tester) testDeleteGracefulUsesZeroOnNil(obj runtime.Object, createFn CreateFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(5))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
objectMeta := t.getObjectMetaOrFail(foo)
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !wasDeleted {
t.Errorf("unexpected, object %s should have been deleted immediately", objectMeta.GetName())
}
if _, err := t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{}); !errors.IsNotFound(err) {
t.Errorf("unexpected error, object should not exist: %v", err)
}
}
// Regression test for bug discussed in #27539
func (t *Tester) testDeleteGracefulShorten(obj runtime.Object, createFn CreateFunc, getFn GetFunc, expectedGrace int64) {
ctx := t.TestContext()
foo := obj.DeepCopyObject()
t.setObjectMeta(foo, t.namer(6))
if err := createFn(ctx, foo); err != nil {
t.Errorf("unexpected error: %v", err)
}
bigGrace := int64(time.Hour)
if expectedGrace > bigGrace {
bigGrace = 2 * expectedGrace
}
objectMeta := t.getObjectMetaOrFail(foo)
_, wasDeleted, err := t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(bigGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err := getFn(ctx, foo)
if err != nil {
t.Fatalf("did not gracefully delete resource: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
deletionTimestamp := *objectMeta.GetDeletionTimestamp()
// second delete duration is ignored
_, wasDeleted, err = t.storage.(rest.GracefulDeleter).Delete(ctx, objectMeta.GetName(), metav1.NewDeleteOptions(expectedGrace))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if wasDeleted {
t.Errorf("unexpected, object %s should not have been deleted immediately", objectMeta.GetName())
}
object, err = t.storage.(rest.Getter).Get(ctx, objectMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error, object should exist: %v", err)
}
objectMeta = t.getObjectMetaOrFail(object)
if objectMeta.GetDeletionTimestamp() == nil || objectMeta.GetDeletionGracePeriodSeconds() == nil ||
*objectMeta.GetDeletionGracePeriodSeconds() != expectedGrace || !objectMeta.GetDeletionTimestamp().Before(&deletionTimestamp) {
t.Errorf("unexpected deleted meta: %#v", objectMeta)
}
}
// =============================================================================
// Get tests.
// testGetDifferentNamespace ensures same-name objects in different namespaces do not clash
func (t *Tester) testGetDifferentNamespace(obj runtime.Object) {
if t.clusterScope {
t.Fatalf("the test does not work in in cluster-scope")
}
objMeta := t.getObjectMetaOrFail(obj)
objMeta.SetName(t.namer(5))
ctx1 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar3")
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx1))
_, err := t.storage.(rest.Creater).Create(ctx1, obj, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
ctx2 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar4")
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx2))
_, err = t.storage.(rest.Creater).Create(ctx2, obj, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got1, err := t.storage.(rest.Getter).Get(ctx1, objMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got1Meta := t.getObjectMetaOrFail(got1)
if got1Meta.GetName() != objMeta.GetName() {
t.Errorf("unexpected name of object: %#v, expected: %s", got1, objMeta.GetName())
}
if got1Meta.GetNamespace() != genericapirequest.NamespaceValue(ctx1) {
t.Errorf("unexpected namespace of object: %#v, expected: %s", got1, genericapirequest.NamespaceValue(ctx1))
}
got2, err := t.storage.(rest.Getter).Get(ctx2, objMeta.GetName(), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
got2Meta := t.getObjectMetaOrFail(got2)
if got2Meta.GetName() != objMeta.GetName() {
t.Errorf("unexpected name of object: %#v, expected: %s", got2, objMeta.GetName())
}
if got2Meta.GetNamespace() != genericapirequest.NamespaceValue(ctx2) {
t.Errorf("unexpected namespace of object: %#v, expected: %s", got2, genericapirequest.NamespaceValue(ctx2))
}
}
func (t *Tester) testGetFound(obj runtime.Object) {
ctx := t.TestContext()
t.setObjectMeta(obj, t.namer(1))
existing, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
existingMeta := t.getObjectMetaOrFail(existing)
got, err := t.storage.(rest.Getter).Get(ctx, t.namer(1), &metav1.GetOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
gotMeta := t.getObjectMetaOrFail(got)
gotMeta.SetResourceVersion(existingMeta.GetResourceVersion())
if e, a := existing, got; !apiequality.Semantic.DeepEqual(e, a) {
t.Errorf("unexpected obj: %#v, expected %#v", e, a)
}
}
func (t *Tester) testGetMimatchedNamespace(obj runtime.Object) {
ctx1 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar1")
ctx2 := genericapirequest.WithNamespace(genericapirequest.NewContext(), "bar2")
objMeta := t.getObjectMetaOrFail(obj)
objMeta.SetName(t.namer(4))
objMeta.SetNamespace(genericapirequest.NamespaceValue(ctx1))
_, err := t.storage.(rest.Creater).Create(ctx1, obj, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = t.storage.(rest.Getter).Get(ctx2, t.namer(4), &metav1.GetOptions{})
if t.clusterScope {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
} else {
if !errors.IsNotFound(err) {
t.Errorf("unexpected error returned: %#v", err)
}
}
}
func (t *Tester) testGetNotFound(obj runtime.Object) {
ctx := t.TestContext()
t.setObjectMeta(obj, t.namer(2))
_, err := t.storage.(rest.Creater).Create(ctx, obj, rest.ValidateAllObjectFunc, false)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
_, err = t.storage.(rest.Getter).Get(ctx, t.namer(3), &metav1.GetOptions{})
if !errors.IsNotFound(err) {
t.Errorf("unexpected error returned: %#v", err)
}
}
// =============================================================================
// List tests.
func listToItems(listObj runtime.Object) ([]runtime.Object, error) {
v, err := conversion.EnforcePtr(listObj)
if err != nil {
return nil, fmt.Errorf("unexpected error: %v", err)
}
items := v.FieldByName("Items")
if !items.IsValid() {
return nil, fmt.Errorf("unexpected Items field in %v", listObj)
}
if items.Type().Kind() != reflect.Slice {
return nil, fmt.Errorf("unexpected Items field type: %v", items.Type().Kind())
}
result := make([]runtime.Object, items.Len())
for i := 0; i < items.Len(); i++ {
result[i] = items.Index(i).Addr().Interface().(runtime.Object)
}
return result, nil
}
func (t *Tester) testListFound(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
foo1 := obj.DeepCopyObject()
t.setObjectMeta(foo1, t.namer(1))
foo2 := obj.DeepCopyObject()
t.setObjectMeta(foo2, t.namer(2))
existing := assignFn([]runtime.Object{foo1, foo2})
listObj, err := t.storage.(rest.Lister).List(ctx, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(existing) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(existing, items) {
t.Errorf("expected: %#v, got: %#v", existing, items)
}
}
func (t *Tester) testListMatchLabels(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
testLabels := map[string]string{"key": "value"}
foo3 := obj.DeepCopyObject()
t.setObjectMeta(foo3, "foo3")
foo4 := obj.DeepCopyObject()
foo4Meta := t.getObjectMetaOrFail(foo4)
foo4Meta.SetName("foo4")
foo4Meta.SetNamespace(genericapirequest.NamespaceValue(ctx))
foo4Meta.SetLabels(testLabels)
objs := ([]runtime.Object{foo3, foo4})
assignFn(objs)
filtered := []runtime.Object{objs[1]}
selector := labels.SelectorFromSet(labels.Set(testLabels))
options := &metainternalversion.ListOptions{LabelSelector: selector}
listObj, err := t.storage.(rest.Lister).List(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(filtered) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(filtered, items) {
t.Errorf("expected: %#v, got: %#v", filtered, items)
}
}
func (t *Tester) testListNotFound(assignFn AssignFunc) {
ctx := t.TestContext()
_ = assignFn([]runtime.Object{})
listObj, err := t.storage.(rest.Lister).List(ctx, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != 0 {
t.Errorf("unexpected items: %#v", items)
}
}
// testListTableConversion verifies a set of known bounds and expected limitations for the values
// returned from a TableList. These conditions may be changed if necessary with adequate review.
func (t *Tester) testListTableConversion(obj runtime.Object, assignFn AssignFunc) {
ctx := t.TestContext()
testLabels := map[string]string{"key": "value"}
foo3 := obj.DeepCopyObject()
t.setObjectMeta(foo3, "foo3")
foo4 := obj.DeepCopyObject()
foo4Meta := t.getObjectMetaOrFail(foo4)
foo4Meta.SetName("foo4")
foo4Meta.SetNamespace(genericapirequest.NamespaceValue(ctx))
foo4Meta.SetLabels(testLabels)
objs := ([]runtime.Object{foo3, foo4})
assignFn(objs)
options := &metainternalversion.ListOptions{}
listObj, err := t.storage.(rest.Lister).List(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
items, err := listToItems(listObj)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(items) != len(objs) {
t.Errorf("unexpected number of items: %v", len(items))
}
if !apiequality.Semantic.DeepEqual(objs, items) {
t.Errorf("expected: %#v, got: %#v", objs, items)
}
m, err := meta.ListAccessor(listObj)
if err != nil {
t.Fatalf("list should support ListMeta %T: %v", listObj, err)
}
m.SetContinue("continuetoken")
m.SetResourceVersion("11")
m.SetSelfLink("/list/link")
table, err := t.storage.(rest.TableConvertor).ConvertToTable(ctx, listObj, nil)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if table.ResourceVersion != "11" || table.SelfLink != "/list/link" || table.Continue != "continuetoken" {
t.Errorf("printer lost list meta: %#v", table.ListMeta)
}
if len(table.Rows) != len(items) {
t.Errorf("unexpected number of rows: %v", len(table.Rows))
}
columns := table.ColumnDefinitions
if len(columns) == 0 {
t.Errorf("unexpected number of columns: %v", len(columns))
}
if columns[0].Name != "Name" || columns[0].Type != "string" || columns[0].Format != "name" {
t.Errorf("expect column 0 to be the name column: %#v", columns[0])
}
for j, column := range columns {
if len(column.Name) == 0 {
t.Errorf("column %d has no name", j)
}
switch column.Type {
case "string", "date", "integer":
default:
t.Errorf("column %d has unexpected type: %q", j, column.Type)
}
switch {
case column.Format == "":
case column.Format == "name" && column.Type == "string":
default:
t.Errorf("column %d has unexpected format: %q with type %q", j, column.Format, column.Type)
}
if column.Priority < 0 || column.Priority > 2 {
t.Errorf("column %d has unexpected priority: %q", j, column.Priority)
}
if len(column.Description) == 0 {
t.Errorf("column %d has no description", j)
}
if column.Name == "Created At" && column.Type != "date" && column.Format != "" {
t.Errorf("column %d looks like a created at column, but has a different type and format: %#v", j, column)
}
}
for i, row := range table.Rows {
if len(row.Cells) != len(table.ColumnDefinitions) {
t.Errorf("row %d did not have the correct number of cells: %d in %v", i, len(table.ColumnDefinitions), row.Cells)
}
for j, cell := range row.Cells {
// do not add to this test without discussion - may break clients
switch cell.(type) {
case float64, int64, int32, int, string, bool:
case []interface{}:
default:
t.Errorf("row %d, cell %d has an unrecognized type, only JSON serialization safe types are allowed: %T ", i, j, cell)
}
}
if len(row.Cells) != len(table.ColumnDefinitions) {
}
}
}
// =============================================================================
// Watching tests.
func (t *Tester) testWatchFields(obj runtime.Object, emitFn EmitFunc, fieldsPass, fieldsFail []fields.Set, actions []string) {
ctx := t.TestContext()
for _, field := range fieldsPass {
for _, action := range actions {
options := &metainternalversion.ListOptions{FieldSelector: field.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v, %v", err, action)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case _, ok := <-watcher.ResultChan():
if !ok {
t.Errorf("watch channel should be open")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
watcher.Stop()
}
}
for _, field := range fieldsFail {
for _, action := range actions {
options := &metainternalversion.ListOptions{FieldSelector: field.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case <-watcher.ResultChan():
t.Errorf("unexpected result from result channel")
case <-time.After(time.Millisecond * 500):
// expected case
}
watcher.Stop()
}
}
}
func (t *Tester) testWatchLabels(obj runtime.Object, emitFn EmitFunc, labelsPass, labelsFail []labels.Set, actions []string) {
ctx := t.TestContext()
for _, label := range labelsPass {
for _, action := range actions {
options := &metainternalversion.ListOptions{LabelSelector: label.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case _, ok := <-watcher.ResultChan():
if !ok {
t.Errorf("watch channel should be open")
}
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
watcher.Stop()
}
}
for _, label := range labelsFail {
for _, action := range actions {
options := &metainternalversion.ListOptions{LabelSelector: label.AsSelector(), ResourceVersion: "1"}
watcher, err := t.storage.(rest.Watcher).Watch(ctx, options)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := emitFn(obj, action); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case <-watcher.ResultChan():
t.Errorf("unexpected result from result channel")
case <-time.After(time.Millisecond * 500):
// expected case
}
watcher.Stop()
}
}
}
| New |
run_text_classification.py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for sequence classification."""
# You can also adapt this script on your own text classification task. Pointers for this are left as comments.
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from math import ceil
from pathlib import Path
from typing import Optional
import numpy as np
from datasets import load_dataset
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
PretrainedConfig,
TFAutoModelForSequenceClassification,
TrainingArguments,
set_seed,
)
from transformers.file_utils import CONFIG_NAME, TF2_WEIGHTS_NAME
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" # Reduce the amount of console output from TF
import tensorflow as tf # noqa: E402
logger = logging.getLogger(__name__)
# region Helper classes
class DataSequence(tf.keras.utils.Sequence):
# We use a Sequence object to load the data. Although it's completely possible to load your data as Numpy/TF arrays
# and pass those straight to the Model, this constrains you in a couple of ways. Most notably, it requires all
# the data to be padded to the length of the longest input example, and it also requires the whole dataset to be
# loaded into memory. If these aren't major problems for you, you can skip the sequence object in your own code!
def __init__(self, dataset, non_label_column_names, batch_size, labels, shuffle=True):
super().__init__()
# Retain all of the columns not present in the original data - these are the ones added by the tokenizer
self.data = {
key: dataset[key]
for key in dataset.features.keys()
if key not in non_label_column_names and key != "label"
}
data_lengths = {len(array) for array in self.data.values()}
assert len(data_lengths) == 1, "Dataset arrays differ in length!"
self.data_length = data_lengths.pop()
self.num_batches = ceil(self.data_length / batch_size)
if labels:
self.labels = np.array(dataset["label"])
assert len(self.labels) == self.data_length, "Labels not the same length as input arrays!"
else:
self.labels = None
self.batch_size = batch_size
self.shuffle = shuffle
if self.shuffle:
# Shuffle the data order
self.permutation = np.random.permutation(self.data_length)
else:
self.permutation = None
def on_epoch_end(self):
# If we're shuffling, reshuffle the data order after each epoch
if self.shuffle:
self.permutation = np.random.permutation(self.data_length)
def __getitem__(self, item):
# Note that this yields a batch, not a single sample
batch_start = item * self.batch_size
batch_end = (item + 1) * self.batch_size
if self.shuffle:
data_indices = self.permutation[batch_start:batch_end]
else:
data_indices = np.arange(batch_start, batch_end)
# We want to pad the data as little as possible, so we only pad each batch
# to the maximum length within that batch. We do that by stacking the variable-
# length inputs into a ragged tensor and then densifying it.
batch_input = {
key: tf.ragged.constant([data[i] for i in data_indices]).to_tensor() for key, data in self.data.items()
}
if self.labels is None:
return batch_input
else:
batch_labels = self.labels[data_indices]
return batch_input, batch_labels
def __len__(self):
return self.num_batches
class SavePretrainedCallback(tf.keras.callbacks.Callback):
# Hugging Face models have a save_pretrained() method that saves both the weights and the necessary
# metadata to allow them to be loaded as a pretrained model in future. This is a simple Keras callback
# that saves the model with this method after each epoch.
def __init__(self, output_dir, **kwargs):
super().__init__()
self.output_dir = output_dir
def on_epoch_end(self, epoch, logs=None):
|
# endregion
# region Command-line arguments
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
max_seq_length: int = field(
default=128,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of predict examples to this "
"value if set."
},
)
def __post_init__(self):
train_extension = self.train_file.split(".")[-1].lower() if self.train_file is not None else None
validation_extension = (
self.validation_file.split(".")[-1].lower() if self.validation_file is not None else None
)
test_extension = self.test_file.split(".")[-1].lower() if self.test_file is not None else None
extensions = {train_extension, validation_extension, test_extension}
extensions.discard(None)
assert len(extensions) != 0, "Need to supply at least one of --train_file, --validation_file or --test_file!"
assert len(extensions) == 1, "All input files should have the same file extension, either csv or json!"
assert "csv" in extensions or "json" in extensions, "Input files should have either .csv or .json extensions!"
self.input_file_extension = extensions.pop()
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# endregion
# region Checkpoints
# Detecting last checkpoint.
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():
checkpoint = output_dir
logger.info(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO)
logger.info(f"Training/evaluation parameters {training_args}")
# endregion
# region Loading data
# For CSV/JSON files, this script will use the 'label' field as the label and the 'sentence1' and optionally
# 'sentence2' fields as inputs if they exist. If not, the first two fields not named label are used if at least two
# columns are provided. Note that the term 'sentence' can be slightly misleading, as they often contain more than
# a single grammatical sentence, when the task requires it.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file, "test": data_args.test_file}
data_files = {key: file for key, file in data_files.items() if file is not None}
for key in data_files.keys():
logger.info(f"Loading a local file for {key}: {data_files[key]}")
if data_args.input_file_extension == "csv":
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# endregion
# region Label preprocessing
# If you've passed us a training set, we try to infer your labels from it
if "train" in datasets:
# By default we assume that if your label column looks like a float then you're doing regression,
# and if not then you're doing classification. This is something you may want to change!
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# If you haven't passed a training set, we read label info from the saved model (this happens later)
else:
num_labels = None
label_list = None
is_regression = None
# endregion
# region Load pretrained model and tokenizer
# Set seed before initializing model
set_seed(training_args.seed)
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if checkpoint is not None:
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
if num_labels is not None:
config = AutoConfig.from_pretrained(
config_path,
num_labels=num_labels,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
config = AutoConfig.from_pretrained(
config_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if checkpoint is None:
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForSequenceClassification.from_pretrained(
model_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# endregion
# region Optimizer, loss and compilation
optimizer = tf.keras.optimizers.Adam(
learning_rate=training_args.learning_rate,
beta_1=training_args.adam_beta1,
beta_2=training_args.adam_beta2,
epsilon=training_args.adam_epsilon,
clipnorm=training_args.max_grad_norm,
)
if is_regression:
loss = tf.keras.losses.MeanSquaredError()
metrics = []
else:
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = ["accuracy"]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# endregion
# region Dataset preprocessing
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
column_names = {col for cols in datasets.column_names.values() for col in cols}
non_label_column_names = [name for name in column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
elif "sentence1" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", None
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Ensure that our labels match the model's, if it has some pre-specified
if "train" in datasets:
if not is_regression and model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id:
label_name_to_id = model.config.label2id
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = label_name_to_id # Use the model's labels
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
label_to_id = {v: i for i, v in enumerate(label_list)}
elif not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
else:
label_to_id = None
# Now we've established our label2id, let's overwrite the model config with it.
model.config.label2id = label_to_id
if model.config.label2id is not None:
model.config.id2label = {id: label for label, id in label_to_id.items()}
else:
model.config.id2label = None
else:
label_to_id = model.config.label2id # Just load the data from the model
if "validation" in datasets and model.config.label2id is not None:
validation_label_list = datasets["validation"].unique("label")
for val_label in validation_label_list:
assert val_label in label_to_id, f"Label {val_label} is in the validation set but not the training set!"
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs
if model.config.label2id is not None and "label" in examples:
result["label"] = [(model.config.label2id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if "train" in datasets:
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
# Log a few random samples from the training set so we can see that it's working as expected:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
if "validation" in datasets:
eval_dataset = datasets["validation"]
if data_args.max_eval_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if "test" in datasets:
predict_dataset = datasets["test"]
if data_args.max_predict_samples is not None:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
# endregion
# region Training
if "train" in datasets:
training_dataset = DataSequence(
train_dataset, non_label_column_names, batch_size=training_args.per_device_train_batch_size, labels=True
)
if "validation" in datasets:
eval_dataset = DataSequence(
eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True
)
else:
eval_dataset = None
callbacks = [SavePretrainedCallback(output_dir=training_args.output_dir)]
model.fit(
training_dataset,
validation_data=eval_dataset,
epochs=int(training_args.num_train_epochs),
callbacks=callbacks,
)
elif "validation" in datasets:
# If there's a validation dataset but no training set, just evaluate the metrics
eval_dataset = DataSequence(
eval_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=True
)
logger.info("Computing metrics on validation data...")
if is_regression:
loss = model.evaluate(eval_dataset)
logger.info(f"Loss: {loss:.5f}")
else:
loss, accuracy = model.evaluate(eval_dataset)
logger.info(f"Loss: {loss:.5f}, Accuracy: {accuracy * 100:.4f}%")
# endregion
# region Prediction
if "test" in datasets:
logger.info("Doing predictions on Predict dataset...")
predict_dataset = DataSequence(
predict_dataset, non_label_column_names, batch_size=training_args.per_device_eval_batch_size, labels=False
)
predictions = model.predict(predict_dataset)["logits"]
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_predict_file = os.path.join(training_args.output_dir, "predict_results.txt")
with open(output_predict_file, "w") as writer:
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = model.config.id2label[item]
writer.write(f"{index}\t{item}\n")
logger.info(f"Wrote predictions to {output_predict_file}!")
# endregion
if __name__ == "__main__":
main()
| self.model.save_pretrained(self.output_dir) |
Commands.ts | /**
* Copyright (c) Tiny Technologies, Inc. All rights reserved.
* Licensed under the LGPL or a commercial license.
* For LGPL see License.txt in the project root for license information.
* For commercial licenses see https://www.tiny.cloud/
*/
import * as Dialog from '../ui/Dialog';
const register = (editor) => {
editor.addCommand('mceCodeEditor', () => {
Dialog.open(editor);
});
}; |
export {
register
}; | |
databaseconnection.py | import sqlite3 as sqlite
class ConType:
query = 0
mutation = 1
batch = 2
class DatabaseConnection:
is_batching = False
def __init__(self, database_object: 'MrDatabase', con_type: int=ConType.mutation):
self.database_object = database_object
self.con_type: int = con_type
def __enter__(self):
if self.con_type == ConType.query:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.mutation:
if not DatabaseConnection.is_batching:
self.connect()
elif self.con_type == ConType.batch:
self.connect()
DatabaseConnection.is_batching = True
def __exit__(self, *args):
if self.con_type == ConType.query:
if not self.is_batching:
self.close()
elif self.con_type == ConType.mutation:
if not self.is_batching:
self.commit()
self.close()
elif self.con_type == ConType.batch:
DatabaseConnection.is_batching = False
self.commit()
self.close()
def connect(self):
self.database_object.con = sqlite.connect(self.database_object.database_path)
self.database_object.cur = self.database_object.con.cursor()
def commit(self):
self.database_object.con.commit()
def close(self): | self.database_object.con.close() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.