file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
log.go
package log import ( "sync" "github.com/seerx/logo" "github.com/seerx/logo/logs" ) var ( instance logo.Logger once sync.Once ) func GetDefaultLogger() logo.Logger { once.Do(func() { instance = logs.NewLogger() }) return instance } func WithError(err error, hideCallStacks ...bool) *logo.ProxyLogger { return GetDefaultLogger().WithError(err, hideCallStacks...) } func WithData(data interface{}) *logo.ProxyLogger { return GetDefaultLogger().WithData(data) } func Debug(v ...interface{}) { GetDefaultLogger().Debug(v...) } func Debugf(format string, v ...interface{})
func Info(v ...interface{}) { GetDefaultLogger().Info(v...) } func Infof(format string, v ...interface{}) { GetDefaultLogger().Infof(format, v...) } func Warn(v ...interface{}) { GetDefaultLogger().Warn(v...) } func Warnf(format string, v ...interface{}) { GetDefaultLogger().Warnf(format, v...) } func Error(v ...interface{}) { GetDefaultLogger().Error(v...) } func Errorf(format string, v ...interface{}) { GetDefaultLogger().Errorf(format, v...) } func SetLevel(level logo.LogLevel) { GetDefaultLogger().SetLevel(level) } func SetLogErrorCallStacks(show bool) { GetDefaultLogger().SetLogErrorCallStacks(show) } func SetFormatter(fmt logo.Formatter) { GetDefaultLogger().SetFormatter(fmt) } func SetColorLog(color bool) { GetDefaultLogger().SetColorLog(color) } func SetLogFileLine(log bool) { GetDefaultLogger().SetLogFileLine(log) }
{ GetDefaultLogger().Debugf(format, v...) }
PhasedHaplotypeParser.py
''' @author: Roman Briskine, University of Minnesota ''' import os.path; import re; F_VARIANT = 1; F_CLASS = 2; F_POS = 3; F_REF_ALLELE = 4; F_VAR_ALLELE = 5; F_EXON = 9; F_ACC_OFFSET = 13; class PhasedHaplotypeParser(): def __init__(self, accessionN = 3, accessionColN = 7, delim = '\t'): self.accessionN = accessionN; self.accessionColN = accessionColN; self.delim = delim; self.markers = []; self.haplotypes = []; for k in range(self.accessionN + 1): famId = "F%03d" % k; self.haplotypes.append([famId]); self.nucleotides = { "A":1, "C":2, "G":3, "T":4 }; def
(self, fPathIn, freqTheshold, fPathPhased = None, fPathMarker = None): print("Parsing..."); if fPathPhased == None: fPathPhased = fPathIn + ".haps"; if fPathMarker == None: fPathMarker = fPathIn + ".info"; with open(fPathIn, 'r') as fIn: line = fIn.readline(); hdr = line.split(self.delim); self.haplotypes[0].append("REF"); for k in range(self.accessionN): accNameIdx = F_ACC_OFFSET + k * self.accessionColN; self.haplotypes[k + 1].append(hdr[accNameIdx]); prevPos = 0; line = fIn.readline(); while line != "": fields = line.split(self.delim); if fields[F_CLASS] == "S" and fields[F_EXON] != '' and fields[F_REF_ALLELE] in self.nucleotides: if fields[F_POS] != prevPos: self.markers.append([fields[F_VARIANT] + ":" + fields[F_EXON], fields[F_POS]]); nId = self.nucleotides[fields[F_REF_ALLELE]]; self.haplotypes[0].append(nId); for k in range(self.accessionN): freqIdx = F_ACC_OFFSET + k * self.accessionColN + 3; if float(fields[freqIdx]) > freqThreshold: nId = self.nucleotides[fields[F_VAR_ALLELE].upper()]; self.haplotypes[k + 1].append(nId); else: nId = self.nucleotides[fields[F_REF_ALLELE]]; self.haplotypes[k + 1].append(nId); # else: # for k in range(self.accessionN): # freqIdx = F_ACC_OFFSET + k * self.accessionColN + 3; # if float(fields[freqIdx]) > freqThreshold: # self.haplotypes[k + 1][-1] = self.nucleotides[fields[F_VAR_ALLELE].upper()]; prevPos = fields[F_POS]; line = fIn.readline(); with open(fPathMarker, 'w') as fMarker: for marker in self.markers: fMarker.write(self.delim.join(marker)); fMarker.write('\n'); with open(fPathPhased, 'w') as fPhased: for accession in self.haplotypes: fPhased.write(self.delim.join(map(str, accession)) + '\n'); fPhased.write(self.delim.join(map(str, accession)) + '\n'); if __name__ == '__main__': fPathIn = "variant_table.10_30.txt"; freqThreshold = 0.85; phParser = PhasedHaplotypeParser(); phParser.parse(fPathIn, freqThreshold);
parse
clickable_list.go
package decredmaterial import ( "gioui.org/layout" "gioui.org/unit" "gioui.org/widget" ) type ClickableList struct { layout.List theme *Theme clickables []*widget.Clickable selectedItem int DividerHeight unit.Value } func (t *Theme) NewClickableList(axis layout.Axis) *ClickableList { return &ClickableList{ theme: t, List: layout.List{Axis: axis}, selectedItem: -1, } } func (cl *ClickableList) ItemClicked() (bool, int) { defer func() { cl.selectedItem = -1 }() return cl.selectedItem != -1, cl.selectedItem } func (cl *ClickableList) handleClickables(count int) { if len(cl.clickables) != count { cl.clickables = make([]*widget.Clickable, count) for i := 0; i < count; i++ { cl.clickables[i] = new(widget.Clickable) } } for index, clickable := range cl.clickables { for clickable.Clicked() { cl.selectedItem = index } } } func (cl *ClickableList) Layout(gtx layout.Context, count int, w layout.ListElement) layout.Dimensions { cl.handleClickables(count) return cl.List.Layout(gtx, count, func(gtx layout.Context, i int) layout.Dimensions { row := Clickable(gtx, cl.clickables[i], func(gtx layout.Context) layout.Dimensions { return w(gtx, i) }) // add divider to all rows except last if i < (count-1) && cl.DividerHeight.V > 0 { return layout.Flex{Axis: layout.Vertical}.Layout(gtx, layout.Rigid(func(gtx layout.Context) layout.Dimensions { return row }), layout.Rigid(func(gtx layout.Context) layout.Dimensions { gtx.Constraints.Min.Y += gtx.Px(cl.DividerHeight) return layout.Dimensions{Size: gtx.Constraints.Min} }), ) } return row })
}
unicorn.js
const {OFF, ERROR, WARN} = require('../constants'); module.exports = { rules: { /* * Сброс всех правил в состояние 'Нет ошибки'. Для плавной интеграции */ // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/escape-case.md 'unicorn/escape-case': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/new-for-builtins.md
// url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-array-instanceof.md 'unicorn/no-array-instanceof': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-for-loop.md 'unicorn/no-for-loop': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-hex-escape.md 'unicorn/no-hex-escape': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-unreadable-array-destructuring.md 'unicorn/no-unreadable-array-destructuring': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-zero-fractions.md 'unicorn/no-zero-fractions': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-add-event-listener.md 'unicorn/prefer-add-event-listener': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-starts-ends-with.md 'unicorn/prefer-starts-ends-with': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/regex-shorthand.md 'unicorn/regex-shorthand': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/throw-new-error.md 'unicorn/throw-new-error': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/number-literal-case.md 'unicorn/number-literal-case': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-fn-reference-in-iterator.md 'unicorn/no-fn-reference-in-iterator': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/filename-case.md 'unicorn/filename-case': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/import-index.md 'unicorn/import-index': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-console-spaces.md 'unicorn/no-console-spaces': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/explicit-length-check.md 'unicorn/explicit-length-check': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-flat-map.md 'unicorn/prefer-flat-map': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-event-key.md 'unicorn/prefer-event-key': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/catch-error-name.md 'unicorn/catch-error-name': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/custom-error-definition.md 'unicorn/custom-error-definition': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/error-message.md 'unicorn/error-message': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-abusive-eslint-disable.md 'unicorn/no-abusive-eslint-disable': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-new-buffer.md 'unicorn/no-new-buffer': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-process-exit.md 'unicorn/no-process-exit': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-unsafe-regex.md 'unicorn/no-unsafe-regex': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-unused-properties.md 'unicorn/no-unused-properties': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-exponentiation-operator.md 'unicorn/prefer-exponentiation-operator': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-node-append.md 'unicorn/prefer-node-append': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-node-remove.md 'unicorn/prefer-node-remove': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-query-selector.md 'unicorn/prefer-query-selector': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-spread.md 'unicorn/prefer-spread': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-text-content.md 'unicorn/prefer-text-content': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prefer-type-error.md 'unicorn/prefer-type-error': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/prevent-abbreviations.md 'unicorn/prevent-abbreviations': OFF, // url: https://github.com/sindresorhus/eslint-plugin-unicorn/blob/master/docs/rules/no-keyword-prefix.md 'unicorn/no-keyword-prefix': OFF, }, };
'unicorn/new-for-builtins': OFF,
lib.rs
use serde::{Serialize, Deserialize}; pub mod countries; pub mod processor; #[derive(Serialize, Deserialize)] pub struct Node { pub name: String, pub place: String, pub province: String, pub lat: f64, pub lon: f64,
} #[derive(Serialize)] pub struct Place { pub name: &'static str, pub lat: f64, pub lon: f64, }
operations.rs
#![doc = "generated by AutoRust 0.1.0"] #![allow(unused_mut)] #![allow(unused_variables)] #![allow(unused_imports)] use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub mod operations { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<ComputeOperationListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!("{}/providers/Microsoft.Compute/operations", &operation_config.base_path,); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ComputeOperationListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod availability_sets { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, availability_set_name: &str, subscription_id: &str, ) -> std::result::Result<AvailabilitySet, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}", &operation_config.base_path, subscription_id, resource_group_name, availability_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: AvailabilitySet = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, availability_set_name: &str, parameters: &AvailabilitySet, subscription_id: &str, ) -> std::result::Result<AvailabilitySet, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}", &operation_config.base_path, subscription_id, resource_group_name, availability_set_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: AvailabilitySet = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, availability_set_name: &str, parameters: &AvailabilitySetUpdate, subscription_id: &str, ) -> std::result::Result<AvailabilitySet, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}", &operation_config.base_path, subscription_id, resource_group_name, availability_set_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: AvailabilitySet = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, availability_set_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}", &operation_config.base_path, subscription_id, resource_group_name, availability_set_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, expand: Option<&str>, ) -> std::result::Result<AvailabilitySetListResult, list_by_subscription::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/availabilitySets", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_subscription::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(list_by_subscription::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_subscription::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?; let rsp_value: AvailabilitySetListResult = serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?; list_by_subscription::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_subscription { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<AvailabilitySetListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: AvailabilitySetListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_available_sizes( operation_config: &crate::OperationConfig, resource_group_name: &str, availability_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineSizeListResult, list_available_sizes::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/availabilitySets/{}/vmSizes", &operation_config.base_path, subscription_id, resource_group_name, availability_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_available_sizes::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_available_sizes::BuildRequestError)?; let rsp = client.execute(req).await.context(list_available_sizes::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_available_sizes::ResponseBytesError)?; let rsp_value: VirtualMachineSizeListResult = serde_json::from_slice(&body).context(list_available_sizes::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_available_sizes::ResponseBytesError)?; list_available_sizes::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_available_sizes { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod proximity_placement_groups { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, proximity_placement_group_name: &str, subscription_id: &str, ) -> std::result::Result<ProximityPlacementGroup, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}", &operation_config.base_path, subscription_id, resource_group_name, proximity_placement_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ProximityPlacementGroup = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, proximity_placement_group_name: &str, parameters: &ProximityPlacementGroup, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}", &operation_config.base_path, subscription_id, resource_group_name, proximity_placement_group_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ProximityPlacementGroup = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ProximityPlacementGroup = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(ProximityPlacementGroup), Created201(ProximityPlacementGroup), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, proximity_placement_group_name: &str, parameters: &ProximityPlacementGroupUpdate, subscription_id: &str, ) -> std::result::Result<ProximityPlacementGroup, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}", &operation_config.base_path, subscription_id, resource_group_name, proximity_placement_group_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: ProximityPlacementGroup = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, proximity_placement_group_name: &str, subscription_id: &str, ) -> std::result::Result<(), delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups/{}", &operation_config.base_path, subscription_id, resource_group_name, proximity_placement_group_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(()), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_subscription( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ProximityPlacementGroupListResult, list_by_subscription::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/proximityPlacementGroups", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_subscription::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_subscription::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_subscription::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?; let rsp_value: ProximityPlacementGroupListResult = serde_json::from_slice(&body).context(list_by_subscription::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_subscription::ResponseBytesError)?; list_by_subscription::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_subscription { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<ProximityPlacementGroupListResult, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/proximityPlacementGroups", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ProximityPlacementGroupListResult = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_extension_images { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, type_: &str, version: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineExtensionImage, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions/{}", &operation_config.base_path, subscription_id, location, publisher_name, type_, version ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineExtensionImage = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_types( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineExtensionImage>, list_types::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types", &operation_config.base_path, subscription_id, location, publisher_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_types::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_types::BuildRequestError)?; let rsp = client.execute(req).await.context(list_types::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_types::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineExtensionImage> = serde_json::from_slice(&body).context(list_types::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_types::ResponseBytesError)?; list_types::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_types { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_versions( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, type_: &str, filter: Option<&str>, top: Option<i32>, orderby: Option<&str>, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineExtensionImage>, list_versions::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmextension/types/{}/versions", &operation_config.base_path, subscription_id, location, publisher_name, type_ ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_versions::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(filter) = filter { req_builder = req_builder.query(&[("$filter", filter)]); } if let Some(top) = top { req_builder = req_builder.query(&[("$top", top)]); } if let Some(orderby) = orderby { req_builder = req_builder.query(&[("$orderby", orderby)]); } let req = req_builder.build().context(list_versions::BuildRequestError)?; let rsp = client.execute(req).await.context(list_versions::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_versions::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineExtensionImage> = serde_json::from_slice(&body).context(list_versions::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_versions::ResponseBytesError)?; list_versions::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_versions { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_extensions { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, vm_extension_name: &str, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<VirtualMachineExtension, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name, vm_extension_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineExtension = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, vm_extension_name: &str, extension_parameters: &VirtualMachineExtension, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name, vm_extension_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(extension_parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineExtension = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineExtension = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachineExtension), Created201(VirtualMachineExtension), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, vm_extension_name: &str, extension_parameters: &VirtualMachineExtensionUpdate, subscription_id: &str, ) -> std::result::Result<VirtualMachineExtension, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name, vm_extension_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(extension_parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachineExtension = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, vm_extension_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name, vm_extension_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<VirtualMachineExtensionsListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/extensions", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineExtensionsListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_images { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, offer: &str, skus: &str, version: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineImage, get::Error> { let client = &operation_config.client; let uri_str = & format ! ("{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions/{}" , & operation_config . base_path , subscription_id , location , publisher_name , offer , skus , version) ; let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineImage = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, offer: &str, skus: &str, expand: Option<&str>, top: Option<i32>, orderby: Option<&str>, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineImageResource>, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus/{}/versions", &operation_config.base_path, subscription_id, location, publisher_name, offer, skus ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } if let Some(top) = top { req_builder = req_builder.query(&[("$top", top)]); } if let Some(orderby) = orderby { req_builder = req_builder.query(&[("$orderby", orderby)]); } let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineImageResource> = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_offers( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineImageResource>, list_offers::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers", &operation_config.base_path, subscription_id, location, publisher_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_offers::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_offers::BuildRequestError)?; let rsp = client.execute(req).await.context(list_offers::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_offers::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineImageResource> = serde_json::from_slice(&body).context(list_offers::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_offers::ResponseBytesError)?; list_offers::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_offers { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_publishers( operation_config: &crate::OperationConfig, location: &str, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineImageResource>, list_publishers::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_publishers::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_publishers::BuildRequestError)?; let rsp = client.execute(req).await.context(list_publishers::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_publishers::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineImageResource> = serde_json::from_slice(&body).context(list_publishers::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_publishers::ResponseBytesError)?; list_publishers::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_publishers { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_skus( operation_config: &crate::OperationConfig, location: &str, publisher_name: &str, offer: &str, subscription_id: &str, ) -> std::result::Result<Vec<VirtualMachineImageResource>, list_skus::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/publishers/{}/artifacttypes/vmimage/offers/{}/skus", &operation_config.base_path, subscription_id, location, publisher_name, offer ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_skus::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_skus::BuildRequestError)?; let rsp = client.execute(req).await.context(list_skus::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_skus::ResponseBytesError)?; let rsp_value: Vec<VirtualMachineImageResource> = serde_json::from_slice(&body).context(list_skus::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_skus::ResponseBytesError)?; list_skus::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_skus { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod usage { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, location: &str, subscription_id: &str, ) -> std::result::Result<ListUsagesResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/usages", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ListUsagesResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machines { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list_by_location( operation_config: &crate::OperationConfig, location: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineListResult, list_by_location::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/virtualMachines", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_location::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_location::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_location::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_location::ResponseBytesError)?; let rsp_value: VirtualMachineListResult = serde_json::from_slice(&body).context(list_by_location::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_location::ResponseBytesError)?; list_by_location::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_location { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn capture( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, parameters: &VirtualMachineCaptureParameters, subscription_id: &str, ) -> std::result::Result<capture::Response, capture::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/capture", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(capture::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(capture::BuildRequestError)?; let rsp = client.execute(req).await.context(capture::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(capture::ResponseBytesError)?; let rsp_value: VirtualMachineCaptureResult = serde_json::from_slice(&body).context(capture::DeserializeError { body })?; Ok(capture::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(capture::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(capture::ResponseBytesError)?; capture::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod capture { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachineCaptureResult), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<VirtualMachine, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachine = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, parameters: &VirtualMachine, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachine = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachine = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachine), Created201(VirtualMachine), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, parameters: &VirtualMachineUpdate, subscription_id: &str, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachine = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachine = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachine), Created201(VirtualMachine), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn instance_view( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineInstanceView, instance_view::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/instanceView", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(instance_view::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(instance_view::BuildRequestError)?; let rsp = client.execute(req).await.context(instance_view::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(instance_view::ResponseBytesError)?; let rsp_value: VirtualMachineInstanceView = serde_json::from_slice(&body).context(instance_view::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(instance_view::ResponseBytesError)?; instance_view::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod instance_view { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn convert_to_managed_disks( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<convert_to_managed_disks::Response, convert_to_managed_disks::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/convertToManagedDisks", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(convert_to_managed_disks::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(convert_to_managed_disks::BuildRequestError)?; let rsp = client.execute(req).await.context(convert_to_managed_disks::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(convert_to_managed_disks::Response::Ok200), StatusCode::ACCEPTED => Ok(convert_to_managed_disks::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(convert_to_managed_disks::ResponseBytesError)?; convert_to_managed_disks::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod convert_to_managed_disks { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn deallocate( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<deallocate::Response, deallocate::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/deallocate", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(deallocate::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(deallocate::BuildRequestError)?; let rsp = client.execute(req).await.context(deallocate::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(deallocate::Response::Ok200), StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(deallocate::ResponseBytesError)?; deallocate::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod deallocate { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn generalize( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<(), generalize::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/generalize", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(generalize::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(generalize::BuildRequestError)?; let rsp = client.execute(req).await.context(generalize::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(()), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(generalize::ResponseBytesError)?; generalize::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod generalize { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_all( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<VirtualMachineListResult, list_all::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachines", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_all::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_all::BuildRequestError)?; let rsp = client.execute(req).await.context(list_all::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_all::ResponseBytesError)?; let rsp_value: VirtualMachineListResult = serde_json::from_slice(&body).context(list_all::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_all::ResponseBytesError)?; list_all::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_all { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_available_sizes( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineSizeListResult, list_available_sizes::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/vmSizes", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_available_sizes::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_available_sizes::BuildRequestError)?; let rsp = client.execute(req).await.context(list_available_sizes::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_available_sizes::ResponseBytesError)?; let rsp_value: VirtualMachineSizeListResult = serde_json::from_slice(&body).context(list_available_sizes::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_available_sizes::ResponseBytesError)?; list_available_sizes::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_available_sizes { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn power_off( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<power_off::Response, power_off::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/powerOff", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(power_off::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(power_off::BuildRequestError)?; let rsp = client.execute(req).await.context(power_off::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(power_off::Response::Ok200), StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(power_off::ResponseBytesError)?; power_off::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod power_off { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn restart( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<restart::Response, restart::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/restart", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(restart::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(restart::BuildRequestError)?; let rsp = client.execute(req).await.context(restart::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(restart::Response::Ok200), StatusCode::ACCEPTED => Ok(restart::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(restart::ResponseBytesError)?; restart::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod restart { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn start( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<start::Response, start::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/start", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(start::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(start::BuildRequestError)?; let rsp = client.execute(req).await.context(start::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(start::Response::Ok200), StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(start::ResponseBytesError)?; start::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod start { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn redeploy( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<redeploy::Response, redeploy::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/redeploy", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(redeploy::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(redeploy::BuildRequestError)?; let rsp = client.execute(req).await.context(redeploy::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(redeploy::Response::Ok200), StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(redeploy::ResponseBytesError)?; redeploy::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod redeploy { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn perform_maintenance( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, subscription_id: &str, ) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/performMaintenance", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(perform_maintenance::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(perform_maintenance::BuildRequestError)?; let rsp = client.execute(req).await.context(perform_maintenance::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(perform_maintenance::Response::Ok200), StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(perform_maintenance::ResponseBytesError)?; perform_maintenance::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod perform_maintenance { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn run_command( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_name: &str, parameters: &RunCommandInput, subscription_id: &str, ) -> std::result::Result<run_command::Response, run_command::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachines/{}/runCommand", &operation_config.base_path, subscription_id, resource_group_name, vm_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(run_command::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(run_command::BuildRequestError)?; let rsp = client.execute(req).await.context(run_command::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(run_command::ResponseBytesError)?; let rsp_value: RunCommandResult = serde_json::from_slice(&body).context(run_command::DeserializeError { body })?; Ok(run_command::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(run_command::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(run_command::ResponseBytesError)?; run_command::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod run_command { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(RunCommandResult), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_sizes { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, location: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineSizeListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/vmSizes", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineSizeListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod images { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, image_name: &str, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<Image, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}", &operation_config.base_path, subscription_id, resource_group_name, image_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Image = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, image_name: &str, parameters: &Image, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}", &operation_config.base_path, subscription_id, resource_group_name, image_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Image = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Image = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Image), Created201(Image), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, image_name: &str, parameters: &ImageUpdate, subscription_id: &str, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}", &operation_config.base_path, subscription_id, resource_group_name, image_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Image = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Image = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Image), Created201(Image), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, image_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}", &operation_config.base_path, subscription_id, resource_group_name, image_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<ImageListResult, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ImageListResult = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ImageListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/images", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ImageListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_scale_sets { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSet, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, parameters: &VirtualMachineScaleSet, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachineScaleSet), Created201(VirtualMachineScaleSet), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, parameters: &VirtualMachineScaleSetUpdate, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSet, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSet = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn deallocate( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<deallocate::Response, deallocate::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/deallocate", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(deallocate::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(deallocate::BuildRequestError)?; let rsp = client.execute(req).await.context(deallocate::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(deallocate::Response::Ok200), StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(deallocate::ResponseBytesError)?; deallocate::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod deallocate { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete_instances( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: &VirtualMachineScaleSetVmInstanceRequiredIDs, subscription_id: &str, ) -> std::result::Result<delete_instances::Response, delete_instances::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/delete", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential
req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(vm_instance_i_ds); let req = req_builder.build().context(delete_instances::BuildRequestError)?; let rsp = client.execute(req).await.context(delete_instances::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete_instances::Response::Ok200), StatusCode::ACCEPTED => Ok(delete_instances::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete_instances::ResponseBytesError)?; delete_instances::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete_instances { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_instance_view( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetInstanceView, get_instance_view::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/instanceView", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_instance_view::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_instance_view::BuildRequestError)?; let rsp = client.execute(req).await.context(get_instance_view::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_instance_view::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetInstanceView = serde_json::from_slice(&body).context(get_instance_view::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_instance_view::ResponseBytesError)?; get_instance_view::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_instance_view { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_all( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetListWithLinkResult, list_all::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/virtualMachineScaleSets", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_all::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_all::BuildRequestError)?; let rsp = client.execute(req).await.context(list_all::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_all::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetListWithLinkResult = serde_json::from_slice(&body).context(list_all::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_all::ResponseBytesError)?; list_all::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_all { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_skus( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetListSkusResult, list_skus::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/skus", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_skus::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_skus::BuildRequestError)?; let rsp = client.execute(req).await.context(list_skus::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_skus::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetListSkusResult = serde_json::from_slice(&body).context(list_skus::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_skus::ResponseBytesError)?; list_skus::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_skus { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_os_upgrade_history( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetListOsUpgradeHistory, get_os_upgrade_history::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osUpgradeHistory", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_os_upgrade_history::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_os_upgrade_history::BuildRequestError)?; let rsp = client.execute(req).await.context(get_os_upgrade_history::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_os_upgrade_history::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetListOsUpgradeHistory = serde_json::from_slice(&body).context(get_os_upgrade_history::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_os_upgrade_history::ResponseBytesError)?; get_os_upgrade_history::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_os_upgrade_history { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn power_off( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<power_off::Response, power_off::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/poweroff", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(power_off::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(power_off::BuildRequestError)?; let rsp = client.execute(req).await.context(power_off::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(power_off::Response::Ok200), StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(power_off::ResponseBytesError)?; power_off::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod power_off { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn restart( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<restart::Response, restart::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/restart", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(restart::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(restart::BuildRequestError)?; let rsp = client.execute(req).await.context(restart::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(restart::Response::Ok200), StatusCode::ACCEPTED => Ok(restart::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(restart::ResponseBytesError)?; restart::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod restart { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn start( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<start::Response, start::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/start", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(start::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(start::BuildRequestError)?; let rsp = client.execute(req).await.context(start::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(start::Response::Ok200), StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(start::ResponseBytesError)?; start::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod start { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn redeploy( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<redeploy::Response, redeploy::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/redeploy", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(redeploy::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(redeploy::BuildRequestError)?; let rsp = client.execute(req).await.context(redeploy::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(redeploy::Response::Ok200), StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(redeploy::ResponseBytesError)?; redeploy::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod redeploy { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn perform_maintenance( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/performMaintenance", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(perform_maintenance::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(perform_maintenance::BuildRequestError)?; let rsp = client.execute(req).await.context(perform_maintenance::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(perform_maintenance::Response::Ok200), StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(perform_maintenance::ResponseBytesError)?; perform_maintenance::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod perform_maintenance { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update_instances( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: &VirtualMachineScaleSetVmInstanceRequiredIDs, subscription_id: &str, ) -> std::result::Result<update_instances::Response, update_instances::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/manualupgrade", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update_instances::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(vm_instance_i_ds); let req = req_builder.build().context(update_instances::BuildRequestError)?; let rsp = client.execute(req).await.context(update_instances::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(update_instances::Response::Ok200), StatusCode::ACCEPTED => Ok(update_instances::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update_instances::ResponseBytesError)?; update_instances::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update_instances { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn reimage( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<reimage::Response, reimage::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimage", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(reimage::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(reimage::BuildRequestError)?; let rsp = client.execute(req).await.context(reimage::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(reimage::Response::Ok200), StatusCode::ACCEPTED => Ok(reimage::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(reimage::ResponseBytesError)?; reimage::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod reimage { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn reimage_all( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vm_instance_i_ds: Option<&VirtualMachineScaleSetVmInstanceIDs>, subscription_id: &str, ) -> std::result::Result<reimage_all::Response, reimage_all::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/reimageall", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(reimage_all::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(vm_instance_i_ds) = vm_instance_i_ds { req_builder = req_builder.json(vm_instance_i_ds); } let req = req_builder.build().context(reimage_all::BuildRequestError)?; let rsp = client.execute(req).await.context(reimage_all::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(reimage_all::Response::Ok200), StatusCode::ACCEPTED => Ok(reimage_all::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(reimage_all::ResponseBytesError)?; reimage_all::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod reimage_all { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn force_recovery_service_fabric_platform_update_domain_walk( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, platform_update_domain: i64, ) -> std::result::Result<RecoveryWalkResponse, force_recovery_service_fabric_platform_update_domain_walk::Error> { let client = &operation_config.client; let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/forceRecoveryServiceFabricPlatformUpdateDomainWalk" , & operation_config . base_path , subscription_id , resource_group_name , vm_scale_set_name) ; let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(force_recovery_service_fabric_platform_update_domain_walk::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.query(&[("platformUpdateDomain", platform_update_domain)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder .build() .context(force_recovery_service_fabric_platform_update_domain_walk::BuildRequestError)?; let rsp = client .execute(req) .await .context(force_recovery_service_fabric_platform_update_domain_walk::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp .bytes() .await .context(force_recovery_service_fabric_platform_update_domain_walk::ResponseBytesError)?; let rsp_value: RecoveryWalkResponse = serde_json::from_slice(&body) .context(force_recovery_service_fabric_platform_update_domain_walk::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp .bytes() .await .context(force_recovery_service_fabric_platform_update_domain_walk::ResponseBytesError)?; force_recovery_service_fabric_platform_update_domain_walk::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod force_recovery_service_fabric_platform_update_domain_walk { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_scale_set_extensions { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vmss_extension_name: &str, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetExtension, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, vmss_extension_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetExtension = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vmss_extension_name: &str, extension_parameters: &VirtualMachineScaleSetExtension, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, vmss_extension_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(extension_parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetExtension = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetExtension = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachineScaleSetExtension), Created201(VirtualMachineScaleSetExtension), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, vmss_extension_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, vmss_extension_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetExtensionListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/extensions", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetExtensionListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_scale_set_rolling_upgrades { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn cancel( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<cancel::Response, cancel::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/cancel", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(cancel::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(cancel::BuildRequestError)?; let rsp = client.execute(req).await.context(cancel::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(cancel::Response::Ok200), StatusCode::ACCEPTED => Ok(cancel::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(cancel::ResponseBytesError)?; cancel::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod cancel { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn start_os_upgrade( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<start_os_upgrade::Response, start_os_upgrade::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/osRollingUpgrade", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(start_os_upgrade::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(start_os_upgrade::BuildRequestError)?; let rsp = client.execute(req).await.context(start_os_upgrade::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(start_os_upgrade::Response::Ok200), StatusCode::ACCEPTED => Ok(start_os_upgrade::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(start_os_upgrade::ResponseBytesError)?; start_os_upgrade::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod start_os_upgrade { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_latest( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, subscription_id: &str, ) -> std::result::Result<RollingUpgradeStatusInfo, get_latest::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/rollingUpgrades/latest", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_latest::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_latest::BuildRequestError)?; let rsp = client.execute(req).await.context(get_latest::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_latest::ResponseBytesError)?; let rsp_value: RollingUpgradeStatusInfo = serde_json::from_slice(&body).context(get_latest::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_latest::ResponseBytesError)?; get_latest::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_latest { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_scale_set_v_ms { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn reimage( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<reimage::Response, reimage::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimage", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(reimage::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(reimage::BuildRequestError)?; let rsp = client.execute(req).await.context(reimage::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(reimage::Response::Ok200), StatusCode::ACCEPTED => Ok(reimage::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(reimage::ResponseBytesError)?; reimage::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod reimage { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn reimage_all( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<reimage_all::Response, reimage_all::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/reimageall", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(reimage_all::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(reimage_all::BuildRequestError)?; let rsp = client.execute(req).await.context(reimage_all::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(reimage_all::Response::Ok200), StatusCode::ACCEPTED => Ok(reimage_all::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(reimage_all::ResponseBytesError)?; reimage_all::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod reimage_all { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn deallocate( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<deallocate::Response, deallocate::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/deallocate", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(deallocate::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(deallocate::BuildRequestError)?; let rsp = client.execute(req).await.context(deallocate::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(deallocate::Response::Ok200), StatusCode::ACCEPTED => Ok(deallocate::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(deallocate::ResponseBytesError)?; deallocate::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod deallocate { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetVm, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetVm = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, parameters: &VirtualMachineScaleSetVm, subscription_id: &str, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetVm = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetVm = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(VirtualMachineScaleSetVm), Accepted202(VirtualMachineScaleSetVm), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get_instance_view( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetVmInstanceView, get_instance_view::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/instanceView", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get_instance_view::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get_instance_view::BuildRequestError)?; let rsp = client.execute(req).await.context(get_instance_view::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get_instance_view::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetVmInstanceView = serde_json::from_slice(&body).context(get_instance_view::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get_instance_view::ResponseBytesError)?; get_instance_view::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get_instance_view { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list( operation_config: &crate::OperationConfig, resource_group_name: &str, virtual_machine_scale_set_name: &str, filter: Option<&str>, select: Option<&str>, expand: Option<&str>, subscription_id: &str, ) -> std::result::Result<VirtualMachineScaleSetVmListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualMachines", &operation_config.base_path, subscription_id, resource_group_name, virtual_machine_scale_set_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(filter) = filter { req_builder = req_builder.query(&[("$filter", filter)]); } if let Some(select) = select { req_builder = req_builder.query(&[("$select", select)]); } if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: VirtualMachineScaleSetVmListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn power_off( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<power_off::Response, power_off::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/poweroff", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(power_off::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(power_off::BuildRequestError)?; let rsp = client.execute(req).await.context(power_off::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(power_off::Response::Ok200), StatusCode::ACCEPTED => Ok(power_off::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(power_off::ResponseBytesError)?; power_off::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod power_off { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn restart( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<restart::Response, restart::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/restart", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(restart::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(restart::BuildRequestError)?; let rsp = client.execute(req).await.context(restart::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(restart::Response::Ok200), StatusCode::ACCEPTED => Ok(restart::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(restart::ResponseBytesError)?; restart::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod restart { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn start( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<start::Response, start::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/start", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(start::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(start::BuildRequestError)?; let rsp = client.execute(req).await.context(start::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(start::Response::Ok200), StatusCode::ACCEPTED => Ok(start::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(start::ResponseBytesError)?; start::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod start { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn redeploy( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<redeploy::Response, redeploy::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/redeploy", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(redeploy::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(redeploy::BuildRequestError)?; let rsp = client.execute(req).await.context(redeploy::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(redeploy::Response::Ok200), StatusCode::ACCEPTED => Ok(redeploy::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(redeploy::ResponseBytesError)?; redeploy::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod redeploy { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn perform_maintenance( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, subscription_id: &str, ) -> std::result::Result<perform_maintenance::Response, perform_maintenance::Error> { let client = &operation_config.client; let uri_str = & format ! ("{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/performMaintenance" , & operation_config . base_path , subscription_id , resource_group_name , vm_scale_set_name , instance_id) ; let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(perform_maintenance::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(perform_maintenance::BuildRequestError)?; let rsp = client.execute(req).await.context(perform_maintenance::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(perform_maintenance::Response::Ok200), StatusCode::ACCEPTED => Ok(perform_maintenance::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(perform_maintenance::ResponseBytesError)?; perform_maintenance::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod perform_maintenance { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn run_command( operation_config: &crate::OperationConfig, resource_group_name: &str, vm_scale_set_name: &str, instance_id: &str, parameters: &RunCommandInput, subscription_id: &str, ) -> std::result::Result<run_command::Response, run_command::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/virtualMachineScaleSets/{}/virtualmachines/{}/runCommand", &operation_config.base_path, subscription_id, resource_group_name, vm_scale_set_name, instance_id ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(run_command::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(run_command::BuildRequestError)?; let rsp = client.execute(req).await.context(run_command::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(run_command::ResponseBytesError)?; let rsp_value: RunCommandResult = serde_json::from_slice(&body).context(run_command::DeserializeError { body })?; Ok(run_command::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(run_command::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(run_command::ResponseBytesError)?; run_command::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod run_command { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(RunCommandResult), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod log_analytics { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn export_request_rate_by_interval( operation_config: &crate::OperationConfig, parameters: &RequestRateByIntervalInput, location: &str, subscription_id: &str, ) -> std::result::Result<export_request_rate_by_interval::Response, export_request_rate_by_interval::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getRequestRateByInterval", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(export_request_rate_by_interval::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(export_request_rate_by_interval::BuildRequestError)?; let rsp = client .execute(req) .await .context(export_request_rate_by_interval::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(export_request_rate_by_interval::ResponseBytesError)?; let rsp_value: LogAnalyticsOperationResult = serde_json::from_slice(&body).context(export_request_rate_by_interval::DeserializeError { body })?; Ok(export_request_rate_by_interval::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(export_request_rate_by_interval::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(export_request_rate_by_interval::ResponseBytesError)?; export_request_rate_by_interval::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod export_request_rate_by_interval { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(LogAnalyticsOperationResult), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn export_throttled_requests( operation_config: &crate::OperationConfig, parameters: &ThrottledRequestsInput, location: &str, subscription_id: &str, ) -> std::result::Result<export_throttled_requests::Response, export_throttled_requests::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/logAnalytics/apiAccess/getThrottledRequests", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(export_throttled_requests::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(export_throttled_requests::BuildRequestError)?; let rsp = client.execute(req).await.context(export_throttled_requests::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(export_throttled_requests::ResponseBytesError)?; let rsp_value: LogAnalyticsOperationResult = serde_json::from_slice(&body).context(export_throttled_requests::DeserializeError { body })?; Ok(export_throttled_requests::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(export_throttled_requests::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(export_throttled_requests::ResponseBytesError)?; export_throttled_requests::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod export_throttled_requests { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(LogAnalyticsOperationResult), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod virtual_machine_run_commands { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, location: &str, subscription_id: &str, ) -> std::result::Result<RunCommandListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands", &operation_config.base_path, subscription_id, location ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: RunCommandListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, location: &str, command_id: &str, subscription_id: &str, ) -> std::result::Result<RunCommandDocument, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/locations/{}/runCommands/{}", &operation_config.base_path, subscription_id, location, command_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: RunCommandDocument = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod resource_skus { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ResourceSkusResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/skus", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ResourceSkusResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod disks { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, ) -> std::result::Result<Disk, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Disk = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, disk: &Disk, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(disk); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Disk = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Disk = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Disk), Accepted202(Disk), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, disk: &DiskUpdate, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(disk); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Disk = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Disk = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Disk), Accepted202(Disk), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<DiskList, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: DiskList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<DiskList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/disks", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: DiskList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn grant_access( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, grant_access_data: &GrantAccessData, ) -> std::result::Result<grant_access::Response, grant_access::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}/beginGetAccess", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(grant_access::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(grant_access_data); let req = req_builder.build().context(grant_access::BuildRequestError)?; let rsp = client.execute(req).await.context(grant_access::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(grant_access::ResponseBytesError)?; let rsp_value: AccessUri = serde_json::from_slice(&body).context(grant_access::DeserializeError { body })?; Ok(grant_access::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(grant_access::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(grant_access::ResponseBytesError)?; grant_access::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod grant_access { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(AccessUri), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn revoke_access( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, disk_name: &str, ) -> std::result::Result<revoke_access::Response, revoke_access::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/disks/{}/endGetAccess", &operation_config.base_path, subscription_id, resource_group_name, disk_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(revoke_access::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(revoke_access::BuildRequestError)?; let rsp = client.execute(req).await.context(revoke_access::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(revoke_access::Response::Ok200), StatusCode::ACCEPTED => Ok(revoke_access::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(revoke_access::ResponseBytesError)?; revoke_access::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod revoke_access { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod snapshots { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, ) -> std::result::Result<Snapshot, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Snapshot = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, snapshot: &Snapshot, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(snapshot); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Snapshot = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Snapshot = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Snapshot), Accepted202(Snapshot), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, snapshot: &SnapshotUpdate, ) -> std::result::Result<update::Response, update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.patch(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(snapshot); let req = req_builder.build().context(update::BuildRequestError)?; let rsp = client.execute(req).await.context(update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Snapshot = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; let rsp_value: Snapshot = serde_json::from_slice(&body).context(update::DeserializeError { body })?; Ok(update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(update::ResponseBytesError)?; update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Snapshot), Accepted202(Snapshot), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<SnapshotList, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: SnapshotList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<SnapshotList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/snapshots", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: SnapshotList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn grant_access( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, grant_access_data: &GrantAccessData, ) -> std::result::Result<grant_access::Response, grant_access::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}/beginGetAccess", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(grant_access::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(grant_access_data); let req = req_builder.build().context(grant_access::BuildRequestError)?; let rsp = client.execute(req).await.context(grant_access::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(grant_access::ResponseBytesError)?; let rsp_value: AccessUri = serde_json::from_slice(&body).context(grant_access::DeserializeError { body })?; Ok(grant_access::Response::Ok200(rsp_value)) } StatusCode::ACCEPTED => Ok(grant_access::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(grant_access::ResponseBytesError)?; grant_access::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod grant_access { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(AccessUri), Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn revoke_access( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, snapshot_name: &str, ) -> std::result::Result<revoke_access::Response, revoke_access::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/snapshots/{}/endGetAccess", &operation_config.base_path, subscription_id, resource_group_name, snapshot_name ); let mut req_builder = client.post(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(revoke_access::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.header(reqwest::header::CONTENT_LENGTH, 0); let req = req_builder.build().context(revoke_access::BuildRequestError)?; let rsp = client.execute(req).await.context(revoke_access::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(revoke_access::Response::Ok200), StatusCode::ACCEPTED => Ok(revoke_access::Response::Accepted202), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(revoke_access::ResponseBytesError)?; revoke_access::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod revoke_access { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } } pub mod galleries { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, ) -> std::result::Result<Gallery, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: Gallery = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery: &Gallery, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(gallery); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Gallery = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Gallery = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: Gallery = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(Gallery), Created201(Gallery), Accepted202(Gallery), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, ) -> std::result::Result<GalleryList, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: GalleryList = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; list_by_resource_group::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list(operation_config: &crate::OperationConfig, subscription_id: &str) -> std::result::Result<GalleryList, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.Compute/galleries", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: GalleryList = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(list::DeserializeError { body })?; list::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod gallery_images { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, ) -> std::result::Result<GalleryImage, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: GalleryImage = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, gallery_image: &GalleryImage, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(gallery_image); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImage = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImage = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImage = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(GalleryImage), Created201(GalleryImage), Accepted202(GalleryImage), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_gallery( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, ) -> std::result::Result<GalleryImageList, list_by_gallery::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images", &operation_config.base_path, subscription_id, resource_group_name, gallery_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_gallery::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_gallery::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_gallery::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_gallery::ResponseBytesError)?; let rsp_value: GalleryImageList = serde_json::from_slice(&body).context(list_by_gallery::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_gallery::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_gallery::DeserializeError { body })?; list_by_gallery::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_gallery { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod gallery_image_versions { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn get( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, gallery_image_version_name: &str, expand: Option<&str>, ) -> std::result::Result<GalleryImageVersion, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}/versions/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); if let Some(expand) = expand { req_builder = req_builder.query(&[("$expand", expand)]); } let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: GalleryImageVersion = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(get::DeserializeError { body })?; get::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, gallery_image_version_name: &str, gallery_image_version: &GalleryImageVersion, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}/versions/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(gallery_image_version); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImageVersion = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImageVersion = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: GalleryImageVersion = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; create_or_update::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(GalleryImageVersion), Created201(GalleryImageVersion), Accepted202(GalleryImageVersion), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn delete( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, gallery_image_version_name: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}/versions/{}", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name, gallery_image_version_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => Ok(delete::Response::Ok200), StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(delete::DeserializeError { body })?; delete::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200, Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } pub async fn list_by_gallery_image( operation_config: &crate::OperationConfig, subscription_id: &str, resource_group_name: &str, gallery_name: &str, gallery_image_name: &str, ) -> std::result::Result<GalleryImageVersionList, list_by_gallery_image::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/galleries/{}/images/{}/versions", &operation_config.base_path, subscription_id, resource_group_name, gallery_name, gallery_image_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_gallery_image::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_gallery_image::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_gallery_image::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_gallery_image::ResponseBytesError)?; let rsp_value: GalleryImageVersionList = serde_json::from_slice(&body).context(list_by_gallery_image::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_gallery_image::ResponseBytesError)?; let rsp_value: CloudError = serde_json::from_slice(&body).context(list_by_gallery_image::DeserializeError { body })?; list_by_gallery_image::DefaultResponse { status_code, value: rsp_value, } .fail() } } } pub mod list_by_gallery_image { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { DefaultResponse { status_code: StatusCode, value: models::CloudError, }, BuildRequestError { source: reqwest::Error, }, ExecuteRequestError { source: reqwest::Error, }, ResponseBytesError { source: reqwest::Error, }, DeserializeError { source: serde_json::Error, body: bytes::Bytes, }, GetTokenError { source: azure_core::errors::AzureError, }, } } } pub mod container_services { use crate::models::*; use reqwest::StatusCode; use snafu::{ResultExt, Snafu}; pub async fn list( operation_config: &crate::OperationConfig, subscription_id: &str, ) -> std::result::Result<ContainerServiceListResult, list::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/providers/Microsoft.ContainerService/containerServices", &operation_config.base_path, subscription_id ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list::BuildRequestError)?; let rsp = client.execute(req).await.context(list::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; let rsp_value: ContainerServiceListResult = serde_json::from_slice(&body).context(list::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list::ResponseBytesError)?; list::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn get( operation_config: &crate::OperationConfig, resource_group_name: &str, container_service_name: &str, subscription_id: &str, ) -> std::result::Result<ContainerService, get::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/containerServices/{}", &operation_config.base_path, subscription_id, resource_group_name, container_service_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(get::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(get::BuildRequestError)?; let rsp = client.execute(req).await.context(get::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; let rsp_value: ContainerService = serde_json::from_slice(&body).context(get::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(get::ResponseBytesError)?; get::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod get { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn create_or_update( operation_config: &crate::OperationConfig, resource_group_name: &str, container_service_name: &str, parameters: &ContainerService, subscription_id: &str, ) -> std::result::Result<create_or_update::Response, create_or_update::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/containerServices/{}", &operation_config.base_path, subscription_id, resource_group_name, container_service_name ); let mut req_builder = client.put(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(create_or_update::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); req_builder = req_builder.json(parameters); let req = req_builder.build().context(create_or_update::BuildRequestError)?; let rsp = client.execute(req).await.context(create_or_update::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ContainerService = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Ok200(rsp_value)) } StatusCode::CREATED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ContainerService = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Created201(rsp_value)) } StatusCode::ACCEPTED => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; let rsp_value: ContainerService = serde_json::from_slice(&body).context(create_or_update::DeserializeError { body })?; Ok(create_or_update::Response::Accepted202(rsp_value)) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(create_or_update::ResponseBytesError)?; create_or_update::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod create_or_update { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Ok200(ContainerService), Created201(ContainerService), Accepted202(ContainerService), } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn delete( operation_config: &crate::OperationConfig, resource_group_name: &str, container_service_name: &str, subscription_id: &str, ) -> std::result::Result<delete::Response, delete::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/containerServices/{}", &operation_config.base_path, subscription_id, resource_group_name, container_service_name ); let mut req_builder = client.delete(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(delete::BuildRequestError)?; let rsp = client.execute(req).await.context(delete::ExecuteRequestError)?; match rsp.status() { StatusCode::ACCEPTED => Ok(delete::Response::Accepted202), StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204), status_code => { let body: bytes::Bytes = rsp.bytes().await.context(delete::ResponseBytesError)?; delete::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod delete { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug)] pub enum Response { Accepted202, NoContent204, } #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } pub async fn list_by_resource_group( operation_config: &crate::OperationConfig, resource_group_name: &str, subscription_id: &str, ) -> std::result::Result<ContainerServiceListResult, list_by_resource_group::Error> { let client = &operation_config.client; let uri_str = &format!( "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.ContainerService/containerServices", &operation_config.base_path, subscription_id, resource_group_name ); let mut req_builder = client.get(uri_str); if let Some(token_credential) = &operation_config.token_credential { let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(list_by_resource_group::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); } req_builder = req_builder.query(&[("api-version", &operation_config.api_version)]); let req = req_builder.build().context(list_by_resource_group::BuildRequestError)?; let rsp = client.execute(req).await.context(list_by_resource_group::ExecuteRequestError)?; match rsp.status() { StatusCode::OK => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; let rsp_value: ContainerServiceListResult = serde_json::from_slice(&body).context(list_by_resource_group::DeserializeError { body })?; Ok(rsp_value) } status_code => { let body: bytes::Bytes = rsp.bytes().await.context(list_by_resource_group::ResponseBytesError)?; list_by_resource_group::UnexpectedResponse { status_code, body: body }.fail() } } } pub mod list_by_resource_group { use crate::{models, models::*}; use reqwest::StatusCode; use snafu::Snafu; #[derive(Debug, Snafu)] #[snafu(visibility(pub(crate)))] pub enum Error { UnexpectedResponse { status_code: StatusCode, body: bytes::Bytes }, BuildRequestError { source: reqwest::Error }, ExecuteRequestError { source: reqwest::Error }, ResponseBytesError { source: reqwest::Error }, DeserializeError { source: serde_json::Error, body: bytes::Bytes }, GetTokenError { source: azure_core::errors::AzureError }, } } }
{ let token_response = token_credential .get_token(&operation_config.token_credential_resource) .await .context(delete_instances::GetTokenError)?; req_builder = req_builder.bearer_auth(token_response.token.secret()); }
object_store.rs
//! CLI handling for object store config (via CLI arguments and environment variables). use std::{convert::TryFrom, fs, num::NonZeroUsize, path::PathBuf, time::Duration}; use futures::TryStreamExt; use object_store::{path::ObjectStorePath, DynObjectStore, ObjectStoreImpl, ThrottleConfig}; use observability_deps::tracing::{info, warn}; use snafu::{ResultExt, Snafu}; use uuid::Uuid; #[derive(Debug, Snafu)] pub enum ParseError { #[snafu(display("Unable to create database directory {:?}: {}", path, source))] CreatingDatabaseDirectory { path: PathBuf, source: std::io::Error, }, #[snafu(display( "Specified {:?} for the object store, required configuration missing for {}", object_store, missing ))] MissingObjectStoreConfig { object_store: ObjectStoreType, missing: String, }, // Creating a new S3 object store can fail if the region is *specified* but // not *parseable* as a rusoto `Region`. The other object store constructors // don't return `Result`. #[snafu(display("Error configuring Amazon S3: {}", source))] InvalidS3Config { source: object_store::Error }, #[snafu(display("Error configuring GCS: {}", source))] InvalidGCSConfig { source: object_store::Error }, #[snafu(display("Error configuring Microsoft Azure: {}", source))] InvalidAzureConfig { source: object_store::Error }, } /// The AWS region to use for Amazon S3 based object storage if none is /// specified. pub const FALLBACK_AWS_REGION: &str = "us-east-1"; /// CLI config for object stores. #[derive(Debug, Clone, clap::Parser)] pub struct ObjectStoreConfig { #[clap( arg_enum, long = "--object-store", env = "INFLUXDB_IOX_OBJECT_STORE", ignore_case = true, long_help = r#"Which object storage to use. If not specified, defaults to memory. Possible values (case insensitive): * memory (default): Effectively no object persistence. * memorythrottled: Like `memory` but with latency and throughput that somewhat resamble a cloud object store. Useful for testing and benchmarking. * file: Stores objects in the local filesystem. Must also set `--data-dir`. * s3: Amazon S3. Must also set `--bucket`, `--aws-access-key-id`, `--aws-secret-access-key`, and possibly `--aws-default-region`. * google: Google Cloud Storage. Must also set `--bucket` and `--google-service-account`. * azure: Microsoft Azure blob storage. Must also set `--bucket`, `--azure-storage-account`, and `--azure-storage-access-key`. "# )] pub object_store: Option<ObjectStoreType>, /// Name of the bucket to use for the object store. Must also set /// `--object-store` to a cloud object storage to have any effect. /// /// If using Google Cloud Storage for the object store, this item as well /// as `--google-service-account` must be set. /// /// If using S3 for the object store, must set this item as well /// as `--aws-access-key-id` and `--aws-secret-access-key`. Can also set /// `--aws-default-region` if not using the fallback region. /// /// If using Azure for the object store, set this item to the name of a /// container you've created in the associated storage account, under /// Blob Service > Containers. Must also set `--azure-storage-account` and /// `--azure-storage-access-key`. #[clap(long = "--bucket", env = "INFLUXDB_IOX_BUCKET")] pub bucket: Option<String>, /// The location InfluxDB IOx will use to store files locally. #[clap(long = "--data-dir", env = "INFLUXDB_IOX_DB_DIR")] pub database_directory: Option<PathBuf>, /// When using Amazon S3 as the object store, set this to an access key that /// has permission to read from and write to the specified S3 bucket. /// /// Must also set `--object-store=s3`, `--bucket`, and /// `--aws-secret-access-key`. Can also set `--aws-default-region` if not /// using the fallback region. /// /// Prefer the environment variable over the command line flag in shared /// environments. #[clap(long = "--aws-access-key-id", env = "AWS_ACCESS_KEY_ID")] pub aws_access_key_id: Option<String>, /// When using Amazon S3 as the object store, set this to the secret access /// key that goes with the specified access key ID. /// /// Must also set `--object-store=s3`, `--bucket`, `--aws-access-key-id`. /// Can also set `--aws-default-region` if not using the fallback region. /// /// Prefer the environment variable over the command line flag in shared /// environments. #[clap(long = "--aws-secret-access-key", env = "AWS_SECRET_ACCESS_KEY")] pub aws_secret_access_key: Option<String>, /// When using Amazon S3 as the object store, set this to the region /// that goes with the specified bucket if different from the fallback /// value. /// /// Must also set `--object-store=s3`, `--bucket`, `--aws-access-key-id`, /// and `--aws-secret-access-key`. #[clap( long = "--aws-default-region", env = "AWS_DEFAULT_REGION", default_value = FALLBACK_AWS_REGION, )] pub aws_default_region: String, /// When using Amazon S3 compatibility storage service, set this to the /// endpoint. /// /// Must also set `--object-store=s3`, `--bucket`. Can also set `--aws-default-region` /// if not using the fallback region. /// /// Prefer the environment variable over the command line flag in shared /// environments. #[clap(long = "--aws-endpoint", env = "AWS_ENDPOINT")] pub aws_endpoint: Option<String>, /// When using Amazon S3 as an object store, set this to the session token. This is handy when using a federated /// login / SSO and you fetch credentials via the UI. /// /// Is it assumed that the session is valid as long as the IOx server is running. /// /// Prefer the environment variable over the command line flag in shared /// environments. #[clap(long = "--aws-session-token", env = "AWS_SESSION_TOKEN")] pub aws_session_token: Option<String>, /// Allow unencrypted HTTP connection to AWS. #[clap(long = "--aws-allow-http", env = "AWS_ALLOW_HTTP")] pub aws_allow_http: bool, /// When using Google Cloud Storage as the object store, set this to the /// path to the JSON file that contains the Google credentials. /// /// Must also set `--object-store=google` and `--bucket`. #[clap(long = "--google-service-account", env = "GOOGLE_SERVICE_ACCOUNT")] pub google_service_account: Option<String>, /// When using Microsoft Azure as the object store, set this to the /// name you see when going to All Services > Storage accounts > `[name]`. /// /// Must also set `--object-store=azure`, `--bucket`, and /// `--azure-storage-access-key`. #[clap(long = "--azure-storage-account", env = "AZURE_STORAGE_ACCOUNT")] pub azure_storage_account: Option<String>, /// When using Microsoft Azure as the object store, set this to one of the /// Key values in the Storage account's Settings > Access keys. /// /// Must also set `--object-store=azure`, `--bucket`, and /// `--azure-storage-account`. /// /// Prefer the environment variable over the command line flag in shared /// environments. #[clap(long = "--azure-storage-access-key", env = "AZURE_STORAGE_ACCESS_KEY")] pub azure_storage_access_key: Option<String>, /// When using a network-based object store, limit the number of connection to this value. #[clap( long = "--object-store-connection-limit", env = "OBJECT_STORE_CONNECTION_LIMIT", default_value = "16" )] pub object_store_connection_limit: NonZeroUsize, } #[derive(Debug, Copy, Clone, PartialEq, clap::ArgEnum)] pub enum ObjectStoreType { Memory, MemoryThrottled, File, S3, Google, Azure, } pub fn warn_about_inmem_store(config: &ObjectStoreConfig) { match config.object_store { Some(ObjectStoreType::Memory) | None => { warn!("NO PERSISTENCE: using Memory for object storage"); } Some(store) => { info!("Using {:?} for object storage", store); } } } impl TryFrom<&ObjectStoreConfig> for ObjectStoreImpl { type Error = ParseError; fn try_from(config: &ObjectStoreConfig) -> Result<Self, Self::Error> { match config.object_store { Some(ObjectStoreType::Memory) | None => Ok(Self::new_in_memory()), Some(ObjectStoreType::MemoryThrottled) => { let config = ThrottleConfig { // for every call: assume a 100ms latency wait_delete_per_call: Duration::from_millis(100), wait_get_per_call: Duration::from_millis(100), wait_list_per_call: Duration::from_millis(100), wait_list_with_delimiter_per_call: Duration::from_millis(100), wait_put_per_call: Duration::from_millis(100), // for list operations: assume we need 1 call per 1k entries at 100ms wait_list_per_entry: Duration::from_millis(100) / 1_000, wait_list_with_delimiter_per_entry: Duration::from_millis(100) / 1_000, // for upload/download: assume 1GByte/s wait_get_per_byte: Duration::from_secs(1) / 1_000_000_000, }; Ok(Self::new_in_memory_throttled(config)) } Some(ObjectStoreType::Google) => { match ( config.bucket.as_ref(), config.google_service_account.as_ref(), ) { (Some(bucket), Some(service_account)) => { Self::new_google_cloud_storage(service_account, bucket) .context(InvalidGCSConfigSnafu) } (bucket, service_account) => { let mut missing_args = vec![]; if bucket.is_none() { missing_args.push("bucket"); } if service_account.is_none() { missing_args.push("google-service-account"); } MissingObjectStoreConfigSnafu { object_store: ObjectStoreType::Google, missing: missing_args.join(", "), } .fail() } } } Some(ObjectStoreType::S3) => { match ( config.bucket.as_ref(), config.aws_access_key_id.as_ref(), config.aws_secret_access_key.as_ref(), config.aws_default_region.as_str(), config.aws_endpoint.as_ref(), config.aws_session_token.as_ref(), ) { (Some(bucket), key_id, secret_key, region, endpoint, session_token) => { Self::new_amazon_s3( key_id, secret_key, region, bucket, endpoint, session_token, config.object_store_connection_limit, config.aws_allow_http, ) .context(InvalidS3ConfigSnafu) } (bucket, _, _, _, _, _) => { let mut missing_args = vec![]; if bucket.is_none() { missing_args.push("bucket"); } MissingObjectStoreConfigSnafu { object_store: ObjectStoreType::S3, missing: missing_args.join(", "), } .fail() } } } Some(ObjectStoreType::Azure) => { match ( config.bucket.as_ref(), config.azure_storage_account.as_ref(), config.azure_storage_access_key.as_ref(), ) { (Some(bucket), Some(storage_account), Some(access_key)) => { Self::new_microsoft_azure(storage_account, access_key, bucket, false) .context(InvalidAzureConfigSnafu) } (bucket, storage_account, access_key) => { let mut missing_args = vec![]; if bucket.is_none() { missing_args.push("bucket"); } if storage_account.is_none() { missing_args.push("azure-storage-account"); } if access_key.is_none() { missing_args.push("azure-storage-access-key"); } MissingObjectStoreConfigSnafu { object_store: ObjectStoreType::Azure, missing: missing_args.join(", "), } .fail() } } } Some(ObjectStoreType::File) => match config.database_directory.as_ref() { Some(db_dir) => { fs::create_dir_all(db_dir) .context(CreatingDatabaseDirectorySnafu { path: db_dir })?; Ok(Self::new_file(&db_dir)) } None => MissingObjectStoreConfigSnafu { object_store: ObjectStoreType::File, missing: "data-dir", } .fail(), }, } } } #[derive(Debug, Snafu)] pub enum CheckError { #[snafu(display("Cannot read from object store: {}", source))] CannotReadObjectStore { source: object_store::Error }, } /// Check if object store is properly configured and accepts writes and reads. /// /// Note: This does NOT test if the object store is writable! pub async fn check_object_store(object_store: &DynObjectStore) -> Result<(), CheckError> { // Use some prefix that will very likely end in an empty result, so we don't pull too much actual data here. let uuid = Uuid::new_v4().to_string(); let mut prefix = object_store.new_path(); prefix.push_dir(&uuid); // create stream (this might fail if the store is not readable) let mut stream = object_store .list(Some(&prefix)) .await .context(CannotReadObjectStoreSnafu)?; // ... but sometimes it fails only if we use the resulting stream, so try that once stream .try_next() .await .context(CannotReadObjectStoreSnafu)?; // store seems to be readable Ok(()) } #[cfg(test)] mod tests { use clap::StructOpt; use object_store::ObjectStoreIntegration; use tempfile::TempDir; use super::*; #[test] fn default_object_store_is_memory() { let config = ObjectStoreConfig::try_parse_from(&["server"]).unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStoreImpl { integration, .. } = object_store; assert!(matches!(integration, ObjectStoreIntegration::InMemory(_))); } #[test] fn explicitly_set_object_store_to_memory() { let config = ObjectStoreConfig::try_parse_from(&["server", "--object-store", "memory"]).unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStoreImpl { integration, .. } = object_store; assert!(matches!(integration, ObjectStoreIntegration::InMemory(_))); } #[test] #[cfg(feature = "aws")] fn valid_s3_config() { let config = ObjectStoreConfig::try_parse_from(&[ "server", "--object-store", "s3", "--bucket", "mybucket", "--aws-access-key-id", "NotARealAWSAccessKey", "--aws-secret-access-key", "NotARealAWSSecretAccessKey", ]) .unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStore { integration, .. } = object_store; assert!(matches!(integration, ObjectStoreIntegration::AmazonS3(_))); } #[test] fn
() { let mut config = ObjectStoreConfig::try_parse_from(&["server", "--object-store", "s3"]).unwrap(); // clean out eventual leaks via env variables config.bucket = None; let err = ObjectStoreImpl::try_from(&config).unwrap_err().to_string(); assert_eq!( err, "Specified S3 for the object store, required configuration missing for bucket" ); } #[test] #[cfg(feature = "gcp")] fn valid_google_config() { let config = ObjectStoreConfig::try_parse_from(&[ "server", "--object-store", "google", "--bucket", "mybucket", "--google-service-account", "~/Not/A/Real/path.json", ]) .unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStore { integration, .. } = object_store; assert!(matches!( integration, ObjectStoreIntegration::GoogleCloudStorage(_) )); } #[test] fn google_config_missing_params() { let mut config = ObjectStoreConfig::try_parse_from(&["server", "--object-store", "google"]).unwrap(); // clean out eventual leaks via env variables config.bucket = None; let err = ObjectStoreImpl::try_from(&config).unwrap_err().to_string(); assert_eq!( err, "Specified Google for the object store, required configuration missing for \ bucket, google-service-account" ); } #[test] #[cfg(feature = "azure")] fn valid_azure_config() { let config = ObjectStoreConfig::try_parse_from(&[ "server", "--object-store", "azure", "--bucket", "mybucket", "--azure-storage-account", "NotARealStorageAccount", "--azure-storage-access-key", "NotARealKey", ]) .unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStore { integration, .. } = object_store; assert!(matches!( integration, ObjectStoreIntegration::MicrosoftAzure(_) )); } #[test] fn azure_config_missing_params() { let mut config = ObjectStoreConfig::try_parse_from(&["server", "--object-store", "azure"]).unwrap(); // clean out eventual leaks via env variables config.bucket = None; let err = ObjectStoreImpl::try_from(&config).unwrap_err().to_string(); assert_eq!( err, "Specified Azure for the object store, required configuration missing for \ bucket, azure-storage-account, azure-storage-access-key" ); } #[test] fn valid_file_config() { let root = TempDir::new().unwrap(); let config = ObjectStoreConfig::try_parse_from(&[ "server", "--object-store", "file", "--data-dir", root.path().to_str().unwrap(), ]) .unwrap(); let object_store = ObjectStoreImpl::try_from(&config).unwrap(); let ObjectStoreImpl { integration, .. } = object_store; assert!(matches!(integration, ObjectStoreIntegration::File(_))); } #[test] fn file_config_missing_params() { let config = ObjectStoreConfig::try_parse_from(&["server", "--object-store", "file"]).unwrap(); let err = ObjectStoreImpl::try_from(&config).unwrap_err().to_string(); assert_eq!( err, "Specified File for the object store, required configuration missing for \ data-dir" ); } }
s3_config_missing_params
helper.ts
import { Token } from "moo"; import { Diagnostic, DiagnosticSeverity, Range } from "vscode-languageserver"; export const tokenToDiagnostic = (token: Token, message: string, severity?: DiagnosticSeverity): Diagnostic => { let line = token.line - 1; let character = token.col - 1; let range: Range = { start: { line, character }, end: { line, character: character + token.toString().length }, }; console.log(JSON.stringify(range)); return { message, range, severity: severity ? severity : DiagnosticSeverity.Error }; }; export const eolTokensToDiagnostic = (tokens: Token[]): Diagnostic | undefined => { if (tokens.filter((tkn: Token) => tkn.type != "space").length >= 1) { return tokensToDiagnostic(tokens, "New line expected."); } return null;
}; export const tokensToDiagnostic = (tokens: Token[], message: string): Diagnostic | undefined => { if (tokens.length >= 1) { let firstTkn = tokens[0]; let lastTkn = tokens[tokens.length - 1]; let range: Range = { start: { line: firstTkn.line - 1, character: firstTkn.col - 1 }, end: { line: lastTkn.line - 1, character: lastTkn.col - 1 + lastTkn.text.length }, }; console.log(JSON.stringify(range)); return { message, range }; } return null; }; export const tokensToString = (tokens: Token[]): string => tokens.map((tkn: Token) => tkn.text).join("");
titles.ts
/** * @beta */ export interface TitleBuilder { joinTitles(parentTitle: string, childTitle: string): string; buildTitle(rootTitle: string, routeTitles: string[][]): string; } /** * @beta */ export class
implements TitleBuilder { public constructor( private segmentSeparator = " - ", private fragmentSeparator = ":" ) {} public joinTitles(parentTitle: string, childTitle: string): string { return parentTitle === "" ? childTitle : childTitle === "" ? parentTitle : `${parentTitle}${this.segmentSeparator}${childTitle}`; } public buildTitle(rootTitle: string, routeTitles: string[][]) { let title = rootTitle; for (const level of routeTitles) { if (title) { title = title + this.segmentSeparator; } let segment = ""; for (const fragment of level) { if (segment) { segment = segment + this.fragmentSeparator; } segment = segment + fragment; } title = title + segment; } return title; } }
DefaultTitleBuilder
main.rs
use std::{ collections::{HashMap, HashSet}, error::Error, io::Read, iter::FromIterator, };
acc += count(graph, x); acc }) }) } fn get_graph(input: &str) -> HashMap<String, Vec<String>> { input .lines() .map(|line| { let sep = line.find(')').unwrap(); (line[0..sep].to_string(), line[sep + 1..].to_string()) }) .fold( HashMap::new(), |mut map: HashMap<String, Vec<String>>, (a, b)| { map.entry(b) .and_modify(|v| v.push(a.clone())) .or_insert_with(|| vec![a]); map }, ) } fn part_one(input: &str) -> u64 { let graph = get_graph(input); graph.keys().fold(0, |mut acc, x| { acc += count(&graph, x); acc }) } fn path_to_center(graph: &HashMap<String, Vec<String>>, node: &str) -> Vec<String> { std::iter::successors(Some(node.to_owned()), |x| { if let Some(y) = graph.get(x) { y.get(0).cloned() } else { None } }) .collect() } fn part_two(input: &str) -> usize { let graph = get_graph(input); let you: HashSet<String> = HashSet::from_iter(path_to_center(&graph, "YOU").into_iter()); let san = HashSet::from_iter(path_to_center(&graph, "SAN").into_iter()); you.difference(&san).count() + san.difference(&you).count() - 2 } fn main() -> Result<(), Box<dyn Error>> { let mut input = String::new(); std::io::stdin().read_to_string(&mut input)?; println!("part_one: {}", part_one(&input)); println!("part_two: {}", part_two(&input)); Ok(()) } #[cfg(test)] mod tests { use super::*; #[test] fn part_one_examples() { assert_eq!( part_one( r#"COM)B B)C C)D D)E E)F B)G G)H D)I E)J J)K K)L"# ), 42 ) } #[test] fn part_two_examples() { assert_eq!( part_two( r#"COM)B B)C C)D D)E E)F B)G G)H D)I E)J J)K K)L K)YOU I)SAN"# ), 4 ) } }
fn count(graph: &HashMap<String, Vec<String>>, node: &str) -> u64 { graph.get(node).map_or(0, |values| { values.iter().fold(1, |mut acc, x| {
sensor.py
"""Platform for sensor integration.""" from __future__ import annotations import homeassistant.helpers.config_validation as cv import requests import voluptuous as vol from homeassistant.components.sensor import SensorEntity, PLATFORM_SCHEMA, SensorStateClass, SensorDeviceClass from homeassistant.const import CONF_USERNAME, CONF_PASSWORD, CONF_API_TOKEN from homeassistant.core import HomeAssistant from homeassistant.helpers.entity_platform import AddEntitiesCallback from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from requests.auth import HTTPBasicAuth PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Required(CONF_API_TOKEN): cv.string, }) def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: AddEntitiesCallback, discovery_info: DiscoveryInfoType | None = None ) -> None: """Set up the sensor platform.""" url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + config.get(CONF_API_TOKEN) payload = {} headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } response = requests.get(url, auth=HTTPBasicAuth(config.get(CONF_USERNAME), config.get(CONF_PASSWORD)), headers=headers, data=payload) response_json = response.json() for x in response_json: account = x.get('user_account') add_entities( [KontomierzSensor(hass, config, account.get('bank_name') + " - " + account.get('display_name'), account.get('iban'))]) class KontomierzSensor(SensorEntity): """Representation of a Sensor.""" def __init__(self, hass, config: dict, entity_name: string, iban: string) -> None: self._attr_device_class = SensorDeviceClass.MONETARY self._attr_state_class = SensorStateClass.MEASUREMENT self._state = None self.hass = hass self.username = config.get(CONF_USERNAME) self.password = config.get(CONF_PASSWORD) self.apiToken = config.get(CONF_API_TOKEN) self.entity_name = entity_name self.iban = iban @property def unique_id(self) -> str | None: return "kontomierz_sensor" + self.entity_name @property def name(self) -> str: return self.entity_name @property def state(self): """Return the state of the sensor.""" return self._state def
(self) -> None: """Fetch new state data for the sensor. This is the only method that should fetch new data for Home Assistant. """ url = "https://secure.kontomierz.pl/k4/user_accounts.json?api_key=" + self.apiToken response = requests.get(url, auth=HTTPBasicAuth(self.username, self.password), headers={ 'Content-Type': 'application/json', 'Accept': 'application/json', }, data={}) response_json = response.json() result = 0.0 for x in response_json: user_account = x.get('user_account') if self.iban == user_account.get('iban'): result = float(user_account.get('balance')) self._attr_native_unit_of_measurement = user_account.get('currency_name') self._state = result
update
font.ts
import parseFontFamily from './fontFamily' import { regExpToken, SPACE, LENGTH, UNSUPPORTED_LENGTH_UNIT, SLASH, } from '../tokenTypes' const NORMAL = regExpToken(/^(normal)$/) const STYLE = regExpToken(/^(italic)$/) const WEIGHT = regExpToken(/^([1-9]00|bold)$/) const VARIANT = regExpToken(/^(small-caps)$/) const defaultFontStyle = 'normal' const defaultFontWeight = 'normal' const defaultFontVariant = [] export default tokenStream => { let fontStyle let fontWeight let fontVariant // let fontSize; let lineHeight // let fontFamily;
/* pass */ } else if (fontStyle === undefined && tokenStream.matches(STYLE)) { fontStyle = tokenStream.lastValue } else if (fontWeight === undefined && tokenStream.matches(WEIGHT)) { fontWeight = tokenStream.lastValue } else if (fontVariant === undefined && tokenStream.matches(VARIANT)) { fontVariant = [tokenStream.lastValue] } else { break } tokenStream.expect(SPACE) numStyleWeightVariantMatched += 1 } const fontSize = tokenStream.expect(LENGTH, UNSUPPORTED_LENGTH_UNIT) if (tokenStream.matches(SLASH)) { lineHeight = tokenStream.expect(LENGTH, UNSUPPORTED_LENGTH_UNIT) } tokenStream.expect(SPACE) const { fontFamily } = parseFontFamily(tokenStream) if (fontStyle === undefined) fontStyle = defaultFontStyle if (fontWeight === undefined) fontWeight = defaultFontWeight if (fontVariant === undefined) fontVariant = defaultFontVariant const out:any = { fontStyle, fontWeight, fontVariant, fontSize, fontFamily } if (lineHeight !== undefined) out.lineHeight = lineHeight return out }
let numStyleWeightVariantMatched = 0 while (numStyleWeightVariantMatched < 3 && tokenStream.hasTokens()) { if (tokenStream.matches(NORMAL)) {
holiday_parser_config.py
from typing import List, Dict, Callable from datetime import datetime from recognizers_text.utilities import RegExpUtility from ..utilities import DateUtils from ..base_holiday import BaseHolidayParserConfiguration from ...resources.french_date_time import FrenchDateTime class FrenchHolidayParserConfiguration(BaseHolidayParserConfiguration): @property def holiday_names(self) -> Dict[str, List[str]]: return self._holiday_names @property def holiday_regex_list(self) -> List[str]: return self._holiday_regexes @property def holiday_func_dictionary(self) -> Dict[str, Callable[[int], datetime]]: return self._holiday_func_dictionary def __init__(self, config): super().__init__() self._holiday_regexes = [ RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex1), RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex2), RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex3), RegExpUtility.get_safe_reg_exp(FrenchDateTime.HolidayRegex4) ] self._holiday_names = FrenchDateTime.HolidayNames #self._variable_holidays_timex_dictionary = FrenchDateTime.VariableHolidaysTimexDictionary def _init_holiday_funcs(self) -> Dict[str, Callable[[int], datetime]]: local = dict([ ('maosbirthday', FrenchHolidayParserConfiguration.mao_birthday), ('yuandan', FrenchHolidayParserConfiguration.new_year), ('teachersday', FrenchHolidayParserConfiguration.teacher_day), ('singleday', FrenchHolidayParserConfiguration.singles_day), ('allsaintsday', FrenchHolidayParserConfiguration.halloween_day), ('youthday', FrenchHolidayParserConfiguration.youth_day), ('childrenday', FrenchHolidayParserConfiguration.children_day), ('femaleday', FrenchHolidayParserConfiguration.female_day), ('treeplantingday', FrenchHolidayParserConfiguration.tree_plant_day), ('arborday', FrenchHolidayParserConfiguration.tree_plant_day), ('girlsday', FrenchHolidayParserConfiguration.girls_day), ('whiteloverday', FrenchHolidayParserConfiguration.white_lover_day), ('loverday', FrenchHolidayParserConfiguration.valentines_day), ('christmas', FrenchHolidayParserConfiguration.christmas_day), ('xmas', FrenchHolidayParserConfiguration.christmas_day), ('newyear', FrenchHolidayParserConfiguration.new_year), ('newyearday', FrenchHolidayParserConfiguration.new_year), ('newyearsday', FrenchHolidayParserConfiguration.new_year), ('inaugurationday', FrenchHolidayParserConfiguration.inauguration_day), ('groundhougday', FrenchHolidayParserConfiguration.groundhog_day), ('valentinesday', FrenchHolidayParserConfiguration.valentines_day),
('stpatrickday', FrenchHolidayParserConfiguration.st_patrick_day), ('aprilfools', FrenchHolidayParserConfiguration.fool_day), ('stgeorgeday', FrenchHolidayParserConfiguration.st_george_day), ('mayday', FrenchHolidayParserConfiguration.mayday), ('cincodemayoday', FrenchHolidayParserConfiguration.cinco_de_mayo_day), ('baptisteday', FrenchHolidayParserConfiguration.baptiste_day), ('usindependenceday', FrenchHolidayParserConfiguration.usa_independence_day), ('independenceday', FrenchHolidayParserConfiguration.usa_independence_day), ('bastilleday', FrenchHolidayParserConfiguration.bastille_day), ('halloweenday', FrenchHolidayParserConfiguration.halloween_day), ('allhallowday', FrenchHolidayParserConfiguration.all_hallow_day), ('allsoulsday', FrenchHolidayParserConfiguration.all_souls_day), ('guyfawkesday', FrenchHolidayParserConfiguration.guyfawkes_day), ('veteransday', FrenchHolidayParserConfiguration.veterans_day), ('christmaseve', FrenchHolidayParserConfiguration.christmas_eve), ('newyeareve', FrenchHolidayParserConfiguration.new_year_eve), ('fathersday', FrenchHolidayParserConfiguration.fathers_day), ('mothersday', FrenchHolidayParserConfiguration.mothers_day), ('labourday', FrenchHolidayParserConfiguration.labour_day) ]) return {**super()._init_holiday_funcs(), **local} @staticmethod def new_year(year: int) -> datetime: return datetime(year, 1, 1) @staticmethod def new_year_eve(year: int) -> datetime: return datetime(year, 12, 31) @staticmethod def christmas_day(year: int) -> datetime: return datetime(year, 12, 25) @staticmethod def christmas_eve(year: int) -> datetime: return datetime(year, 12, 24) @staticmethod def female_day(year: int) -> datetime: return datetime(year, 3, 8) @staticmethod def children_day(year: int) -> datetime: return datetime(year, 6, 1) @staticmethod def halloween_day(year: int) -> datetime: return datetime(year, 10, 31) @staticmethod def easter_day(year: int) -> datetime: return DateUtils.min_value @staticmethod def valentines_day(year: int) -> datetime: return datetime(year, 2, 14) @staticmethod def white_lover_day(year: int) -> datetime: return datetime(year, 3, 14) @staticmethod def fool_day(year: int) -> datetime: return datetime(year, 4, 1) @staticmethod def girls_day(year: int) -> datetime: return datetime(year, 3, 7) @staticmethod def tree_plant_day(year: int) -> datetime: return datetime(year, 3, 12) @staticmethod def youth_day(year: int) -> datetime: return datetime(year, 5, 4) @staticmethod def teacher_day(year: int) -> datetime: return datetime(year, 9, 10) @staticmethod def singles_day(year: int) -> datetime: return datetime(year, 11, 11) @staticmethod def mao_birthday(year: int) -> datetime: return datetime(year, 12, 26) @staticmethod def inauguration_day(year: int) -> datetime: return datetime(year, 1, 20) @staticmethod def groundhog_day(year: int) -> datetime: return datetime(year, 2, 2) @staticmethod def st_patrick_day(year: int) -> datetime: return datetime(year, 3, 17) @staticmethod def st_george_day(year: int) -> datetime: return datetime(year, 4, 23) @staticmethod def mayday(year: int) -> datetime: return datetime(year, 5, 1) @staticmethod def cinco_de_mayo_day(year: int) -> datetime: return datetime(year, 5, 5) @staticmethod def baptiste_day(year: int) -> datetime: return datetime(year, 6, 24) @staticmethod def usa_independence_day(year: int) -> datetime: return datetime(year, 7, 4) @staticmethod def bastille_day(year: int) -> datetime: return datetime(year, 7, 14) @staticmethod def all_hallow_day(year: int) -> datetime: return datetime(year, 11, 1) @staticmethod def all_souls_day(year: int) -> datetime: return datetime(year, 11, 2) @staticmethod def guyfawkes_day(year: int) -> datetime: return datetime(year, 11, 5) @staticmethod def veterans_day(year: int) -> datetime: return datetime(year, 11, 11) @staticmethod def fathers_day(year: int) -> datetime: return datetime(year, 6, 17) @staticmethod def mothers_day(year: int) -> datetime: return datetime(year, 5, 27) @staticmethod def labour_day(year: int) -> datetime: return datetime(year, 5, 1) def get_swift_year(self, text: str) -> int: trimmed_text = text.strip().lower() swift = -10 if trimmed_text.endswith('prochain'): # next - 'l'annee prochain' swift = 1 if trimmed_text.endswith('dernier'): # last - 'l'annee dernier' swift = -1 if trimmed_text.startswith('cette'): # this - 'cette annees' swift = 0 return swift def sanitize_holiday_token(self, holiday: str) -> str: return holiday.replace(' ', '').replace('\'', '')
foo.rs
fn
() { }
main
array_express.py
import requests from django.utils.dateparse import parse_datetime from typing import List, Dict from data_refinery_common.job_lookup import ProcessorPipeline, Downloaders from data_refinery_common.logging import get_and_configure_logger from data_refinery_common.models import ( Experiment, ExperimentAnnotation, ExperimentOrganismAssociation, ExperimentSampleAssociation, Organism, OriginalFile, OriginalFileSampleAssociation, Sample, SampleAnnotation, SurveyJobKeyValue, ) from data_refinery_common.utils import ( get_normalized_platform, get_readable_affymetrix_names, get_supported_microarray_platforms, ) from data_refinery_foreman.surveyor import harmony, utils from data_refinery_foreman.surveyor.external_source import ExternalSourceSurveyor logger = get_and_configure_logger(__name__) EXPERIMENTS_URL = "https://www.ebi.ac.uk/arrayexpress/json/v3/experiments/" SAMPLES_URL = EXPERIMENTS_URL + "{}/samples" UNKNOWN = "UNKNOWN" class UnsupportedPlatformException(Exception): pass class ArrayExpressSurveyor(ExternalSourceSurveyor): def source_type(self): return Downloaders.ARRAY_EXPRESS.value @staticmethod def _get_last_update_date(parsed_json: Dict) -> str: if "lastupdatedate" in parsed_json: return parsed_json["lastupdatedate"] else: return parsed_json["releasedate"] @classmethod def _apply_metadata_to_experiment(cls, experiment_object: Experiment, parsed_json: Dict): # We aren't sure these fields will be populated, or how many there will be. # Try to join them all together, or set a sensible default. experiment_descripton = "" if "description" in parsed_json and len(parsed_json["description"]) > 0: for description_item in parsed_json["description"]: if "text" in description_item: experiment_descripton = experiment_descripton + description_item["text"] + "\n" if experiment_descripton == "": experiment_descripton = "Description not available.\n" experiment_object.source_database = "ARRAY_EXPRESS" experiment_object.title = parsed_json["name"] # This will need to be updated if we ever use Array # Express to get other kinds of data. experiment_object.technology = "MICROARRAY" experiment_object.description = experiment_descripton experiment_object.source_first_published = parse_datetime(parsed_json["releasedate"]) experiment_object.source_last_modified \ = parse_datetime(cls._get_last_update_date(parsed_json)) def create_experiment_from_api(self, experiment_accession_code: str) -> (Experiment, Dict): """Given an experiment accession code, create an Experiment object. Also returns a dictionary of additional information about the platform discovered for the experiment. Will raise an UnsupportedPlatformException if this experiment was conducted using a platform which we don't support. See an example at: https://www.ebi.ac.uk/arrayexpress/json/v3/experiments/E-MTAB-3050/sample """ request_url = EXPERIMENTS_URL + experiment_accession_code experiment_request = utils.requests_retry_session().get(request_url, timeout=60) try: parsed_json = experiment_request.json()["experiments"]["experiment"][0] except KeyError: logger.error("Remote experiment has no Experiment data!", experiment_accession_code=experiment_accession_code, survey_job=self.survey_job.id) raise experiment = {} experiment["name"] = parsed_json["name"] experiment["experiment_accession_code"] = experiment_accession_code # This experiment has no platform at all, and is therefore useless. if 'arraydesign' not in parsed_json or len(parsed_json["arraydesign"]) == 0: logger.warn("Remote experiment has no arraydesign listed.", experiment_accession_code=experiment_accession_code, survey_job=self.survey_job.id) raise UnsupportedPlatformException # If there is more than one arraydesign listed in the experiment # then there is no other way to determine which array was used # for which sample other than looking at the header of the CEL # file. That obviously cannot happen until the CEL file has been # downloaded so we can just mark it as UNKNOWN and let the # downloader inspect the downloaded file to determine the # array then. elif len(parsed_json["arraydesign"]) != 1 or "accession" not in parsed_json["arraydesign"][0]: experiment["platform_accession_code"] = UNKNOWN experiment["platform_accession_name"] = UNKNOWN experiment["manufacturer"] = UNKNOWN else: external_accession = parsed_json["arraydesign"][0]["accession"] for platform in get_supported_microarray_platforms(): if platform["external_accession"] == external_accession: experiment["platform_accession_code"] = get_normalized_platform(platform["platform_accession"]) # Illumina appears in the accession codes for # platforms manufactured by Illumina if "ILLUMINA" in experiment["platform_accession_code"].upper(): experiment["manufacturer"] = "ILLUMINA" experiment["platform_accession_name"] = platform["platform_accession"] else: # It's not Illumina, the only other supported Microarray platform is # Affy. As our list of supported platforms grows this logic will # need to get more sophisticated. experiment["manufacturer"] = "AFFYMETRIX" platform_mapping = get_readable_affymetrix_names() experiment["platform_accession_name"] = platform_mapping[ platform["platform_accession"]] if "platform_accession_code" not in experiment: # We don't know what platform this accession corresponds to. experiment["platform_accession_code"] = external_accession experiment["platform_accession_name"] = UNKNOWN experiment["manufacturer"] = UNKNOWN experiment["release_date"] = parsed_json["releasedate"] experiment["last_update_date"] = self._get_last_update_date(parsed_json) # Create the experiment object try: experiment_object = Experiment.objects.get(accession_code=experiment_accession_code) logger.debug("Experiment already exists, skipping object creation.", experiment_accession_code=experiment_accession_code, survey_job=self.survey_job.id) except Experiment.DoesNotExist: experiment_object = Experiment() experiment_object.accession_code = experiment_accession_code experiment_object.source_url = request_url ArrayExpressSurveyor._apply_metadata_to_experiment(experiment_object, parsed_json) experiment_object.save() json_xa = ExperimentAnnotation() json_xa.experiment = experiment_object json_xa.data = parsed_json json_xa.is_ccdl = False json_xa.save() # Fetch and parse the IDF/SDRF file for any other fields IDF_URL_TEMPLATE = "https://www.ebi.ac.uk/arrayexpress/files/{code}/{code}.idf.txt" idf_url = IDF_URL_TEMPLATE.format(code=experiment_accession_code) idf_text = utils.requests_retry_session().get(idf_url, timeout=60).text lines = idf_text.split('\n') idf_dict = {} for line in lines: keyval = line.strip().split('\t') if len(keyval) == 2: idf_dict[keyval[0]] = keyval[1] elif len(keyval) > 2: idf_dict[keyval[0]] = keyval[1:] idf_xa = ExperimentAnnotation() idf_xa.data = idf_dict idf_xa.experiment = experiment_object idf_xa.is_ccdl = False idf_xa.save() if 'Investigation Title' in idf_dict and isinstance(idf_dict['Investigation Title'], str): experiment_object.title = idf_dict['Investigation Title'] if 'Person Affiliation' in idf_dict: # This is very rare, ex: E-MEXP-32 if isinstance(idf_dict['Person Affiliation'], list): unique_people = list(set(idf_dict['Person Affiliation'])) experiment_object.submitter_institution = ", ".join(unique_people)[:255] else: experiment_object.submitter_institution = idf_dict['Person Affiliation'] # Get protocol_description from "<experiment_url>/protocols" # instead of from idf_dict, because the former provides more # details. protocol_url = request_url + '/protocols' protocol_request = utils.requests_retry_session().get(protocol_url, timeout=60) try: experiment_object.protocol_description = protocol_request.json()['protocols'] except KeyError: logger.warning("Remote experiment has no protocol data!", experiment_accession_code=experiment_accession_code, survey_job=self.survey_job.id) if 'Publication Title' in idf_dict: # This will happen for some superseries. # Ex: E-GEOD-29536 # Assume most recent is "best:, store the rest in experiment annotation. if isinstance(idf_dict['Publication Title'], list): experiment_object.publication_title = "; ".join(idf_dict['Publication Title']) else: experiment_object.publication_title = idf_dict['Publication Title'] experiment_object.has_publication = True if 'Publication DOI' in idf_dict: if isinstance(idf_dict['Publication DOI'], list): experiment_object.publication_doi = ", ".join(idf_dict['Publication DOI']) else: experiment_object.publication_doi = idf_dict['Publication DOI'] experiment_object.has_publication = True if 'PubMed ID' in idf_dict: if isinstance(idf_dict['PubMed ID'], list): experiment_object.pubmed_id = ", ".join(idf_dict['PubMed ID']) else: experiment_object.pubmed_id = idf_dict['PubMed ID'] experiment_object.has_publication = True # Scrape publication title and authorship from Pubmed if experiment_object.pubmed_id: pubmed_metadata = utils.get_title_and_authors_for_pubmed_id(experiment_object.pubmed_id) experiment_object.publication_title = pubmed_metadata[0] experiment_object.publication_authors = pubmed_metadata[1] experiment_object.save() platform_dict = {} for k in ('platform_accession_code', 'platform_accession_name', 'manufacturer'): platform_dict[k] = experiment[k] return experiment_object, platform_dict def determine_sample_accession(self, experiment_accession: str, sample_source_name: str, sample_assay_name: str, filename: str) -> str: """Determine what to use as the sample's accession code. This is a complicated heuristic to determine the sample accession because there isn't a field that consistently contains it so we're trying to figure out a heuristic that will work for all the data. This may need even further refinements if we come across examples that break it. However, what's going on is that we think either the `source` or `assay` field will be the sample accession but it's not always the same. Ex: E-MEXP-669 has it in sample_assay_name. Therefore we try a few different things to determine which it is. The experiment accession must be prefixed since accessions are non-unique on AE, ex "Sample 1" is a valid assay name. """ # It SEEMS like the filename often contains part or all of the # sample name so we first try to see if either field contains # the filename with the extension stripped off: if isinstance(filename, str): stripped_filename = ".".join(filename.split(".")[:-1]) if stripped_filename != "": if stripped_filename in sample_source_name: return experiment_accession + "-" + sample_source_name elif stripped_filename in sample_assay_name: return experiment_accession + "-" + sample_assay_name # Accessions don't have spaces in them, but sometimes these # fields do so next we try to see if one has spaces and the # other doesn't: source_has_spaces = " " in sample_source_name assay_has_spaces = " " in sample_assay_name if assay_has_spaces and not source_has_spaces: return experiment_accession + "-" + sample_source_name elif source_has_spaces and not assay_has_spaces: return experiment_accession + "-" + sample_assay_name # We're out of options so return the longest one. if len(sample_source_name) >= len(sample_assay_name): return experiment_accession + "-" + sample_source_name else: return experiment_accession + "-" + sample_assay_name @staticmethod def extract_protocol_text(protocol_text): """Returns a string representation of protocol_text. protocol_text may be a string or a list containing both strings and dicts, like so (it's what the API returns sometimes, see E-MEXP-2381 as an example): [ "Microarrays were imaged using an Agilent microarray scanner in XDR (eXtended Dynamic Range function) mode and a scan resolution of 5 \u00b5m.", { "br": null }, "(Parameters: Scanning hardware = DNA Microarray Scanner BA [Agilent Technologies], Scanning software = Feature Extraction Software [Agilent])" ] """ if not protocol_text: return '' elif type(protocol_text) == str: return protocol_text.strip() elif type(protocol_text) == list: # These can be {"br": None}, so skip non string lines return " ".join([line.strip() for line in protocol_text if type(line) == str]) else: # Not sure what would get us here, but it's not worth raising an error over return str(protocol_text) @staticmethod def update_sample_protocol_info(existing_protocols, experiment_protocol, protocol_url): """Compares experiment_protocol with a sample's existing_protocols and updates the latter if the former includes any new entry. Returns a two-element tuple, the first is existing_protocols (which may or may not have been updated) and the second is a bool indicating whether exisiting_protocols has been updated. Note that the ArrayExpress experiment-level protocol may include multiple protocol entries. """ if not 'protocol' in experiment_protocol: return (existing_protocols, False) is_updated = False # Compare each entry in experiment protocol with the existing # protocols; if the entry is new, add it to exising_protocols. for new_protocol in experiment_protocol['protocol']: new_protocol_text = new_protocol.get('text', '') new_protocol_text = ArrayExpressSurveyor.extract_protocol_text(new_protocol_text) # Ignore experiment-level protocols whose accession or text # field is unavailable or empty. if (not new_protocol.get('accession', '').strip() or not new_protocol_text): continue new_protocol_is_found = False for existing_protocol in existing_protocols: if (new_protocol.get('accession', '') == existing_protocol['Accession'] and new_protocol_text == existing_protocol['Text'] and new_protocol.get('type', '') == existing_protocol['Type']): new_protocol_is_found = True break if not new_protocol_is_found: existing_protocols.append({ 'Accession': new_protocol['accession'], 'Text': new_protocol_text, 'Type': new_protocol.get('type', ''), # in case 'type' field is unavailable 'Reference': protocol_url }) is_updated = True return (existing_protocols, is_updated) @staticmethod def _apply_harmonized_metadata_to_sample(sample: Sample, harmonized_metadata: dict): """Applies the harmonized metadata to `sample`""" for key, value in harmonized_metadata.items(): setattr(sample, key, value) def create_samples_from_api(self, experiment: Experiment, platform_dict: Dict ) -> List[Sample]: """Generates a Sample item for each sample in an AE experiment. There are many possible data situations for a sample: - If the sample only has raw data available: - If it is on a platform that we support: Download this raw data and process it - If it is not on a platform we support: Don't download anything, don't process anything - If the sample has both raw and derived data: - If the raw data is on a platform we support: Download the raw data and process it, abandon the derived data - If the raw data is not on a platform we support Download the derived data and no-op it, abandon the raw data - If the sample only has derived data: Download the derived data and no-op it. See an example at: https://www.ebi.ac.uk/arrayexpress/json/v3/experiments/E-MTAB-3050/samples """ created_samples = [] samples_endpoint = SAMPLES_URL.format(experiment.accession_code) r = utils.requests_retry_session().get(samples_endpoint, timeout=60) samples = r.json()["experiment"]["sample"] # The SDRF is the complete metadata record on a sample/property basis. # We run this through our harmonizer and then attach the properties # to our created samples. SDRF_URL_TEMPLATE = "https://www.ebi.ac.uk/arrayexpress/files/{code}/{code}.sdrf.txt" sdrf_url = SDRF_URL_TEMPLATE.format(code=experiment.accession_code) sdrf_samples = harmony.parse_sdrf(sdrf_url) harmonized_samples = harmony.harmonize(sdrf_samples) # An experiment can have many samples for sample_data in samples: # For some reason, this sample has no files associated with it. if "file" not in sample_data or len(sample_data['file']) == 0: continue # Each sample is given an experimenatlly-unique title. flat_sample = utils.flatten(sample_data) title = harmony.extract_title(flat_sample) # A sample may actually have many sub files. # If there is raw data, take that. # If not, take the derived. has_raw = False for sub_file in sample_data['file']: # For ex: E-GEOD-15645 if isinstance(sub_file['comment'], list): sub_file_mod = sub_file sub_file_mod['comment'] = sub_file['comment'][0] else: sub_file_mod = sub_file # Some have the 'data' field, but not the actual data # Ex: E-GEOD-9656 if sub_file_mod['type'] == "data" and sub_file_mod['comment'].get('value', None) != None: has_raw = True # 'value' can be None, convert to an empty string to # make it easier to use. comment_value = sub_file_mod['comment'].get('value', '') or '' if 'raw' in comment_value: has_raw = True skip_sample = False for sub_file in sample_data['file']: # Don't get the raw data if it's only a 1-color sample. if 'Cy3' in str(sample_data) and 'Cy5' not in str(sample_data): has_raw = False # Skip derived data if we have it raw. if has_raw and "derived data" in sub_file['type']: continue download_url = None filename = sub_file["name"]
# more than one comment... comments = sub_file["comment"] if isinstance(comments, list): # Could be: "Derived ArrayExpress Data Matrix FTP # file" or: "ArrayExpress FTP file". If there is # no comment with a name including "FTP file" then # we don't know where to download it so we need to # mark this job as an error. Therefore don't catch # the potential exception where download_url # doesn't get defined. for comment in comments: if "FTP file" in comment["name"]: download_url = comment["value"] break else: download_url = comments["value"] if not download_url: logger.error("Sample %s did not specify a download url, skipping.", sample_accession_code, experiment_accession_code=experiment.accession_code, survey_job=self.survey_job.id, sub_file=sub_file) skip_sample = True continue if not filename: logger.error("Sample %s did not specify a filename, skipping.", sample_accession_code, experiment_accession_code=experiment.accession_code, survey_job=self.survey_job.id, sub_file=sub_file) skip_sample = True continue if skip_sample: continue # The accession code is not a simple matter to determine. sample_source_name = sample_data["source"].get("name", "") sample_assay_name = sample_data["assay"].get("name", "") sample_accession_code = self.determine_sample_accession( experiment.accession_code, sample_source_name, sample_assay_name, filename) # Figure out the Organism for this sample organism_name = UNKNOWN for characteristic in sample_data["characteristic"]: if characteristic["category"].upper() == "ORGANISM": organism_name = characteristic["value"].upper() if organism_name == UNKNOWN: logger.error("Sample %s did not specify the organism name.", sample_accession_code, experiment_accession_code=experiment.accession_code, survey_job=self.survey_job.id) organism = None continue else: organism = Organism.get_object_for_name(organism_name) # Create the sample object try: # Associate it with the experiment, but since it # already exists it already has original files # associated with it and it's already been downloaded, # so don't add it to created_samples. sample_object = Sample.objects.get(accession_code=sample_accession_code) # If input experiment includes new protocol information, # update sample's protocol_info. existing_protocols = sample_object.protocol_info protocol_info, is_updated = self.update_sample_protocol_info( existing_protocols, experiment.protocol_description, experiment.source_url + '/protocols' ) if is_updated: sample_object.protocol_info = protocol_info sample_obejct.save() logger.debug("Sample %s already exists, skipping object creation.", sample_accession_code, experiment_accession_code=experiment.accession_code, survey_job=self.survey_job.id) except Sample.DoesNotExist: sample_object = Sample() # The basics sample_object.source_database = "ARRAY_EXPRESS" sample_object.title = title sample_object.accession_code = sample_accession_code sample_object.source_archive_url = samples_endpoint sample_object.organism = organism sample_object.platform_name = platform_dict["platform_accession_name"] sample_object.platform_accession_code = platform_dict["platform_accession_code"] sample_object.manufacturer = platform_dict["manufacturer"] sample_object.technology = "MICROARRAY" protocol_info, is_updated = self.update_sample_protocol_info( existing_protocols=[], experiment_protocol=experiment.protocol_description, protocol_url=experiment.source_url + '/protocols' ) # Do not check is_updated the first time because we must # save a list so we can append to it later. sample_object.protocol_info = protocol_info sample_object.save() # Directly assign the harmonized properties harmonized_sample = harmonized_samples[title] ArrayExpressSurveyor._apply_harmonized_metadata_to_sample(sample_object, harmonized_sample) sample_annotation = SampleAnnotation() sample_annotation.data = sample_data sample_annotation.sample = sample_object sample_annotation.is_ccdl = False sample_annotation.save() original_file = OriginalFile() original_file.filename = filename original_file.source_filename = filename original_file.source_url = download_url original_file.is_downloaded = False original_file.is_archive = True original_file.has_raw = has_raw original_file.save() original_file_sample_association = OriginalFileSampleAssociation() original_file_sample_association.original_file = original_file original_file_sample_association.sample = sample_object original_file_sample_association.save() created_samples.append(sample_object) logger.debug("Created " + str(sample_object), experiment_accession_code=experiment.accession_code, survey_job=self.survey_job.id, sample=sample_object.id) # Create associations if they don't already exist ExperimentSampleAssociation.objects.get_or_create( experiment=experiment, sample=sample_object) ExperimentOrganismAssociation.objects.get_or_create( experiment=experiment, organism=organism) return created_samples def discover_experiment_and_samples(self) -> (Experiment, List[Sample]): experiment_accession_code = ( SurveyJobKeyValue .objects .get(survey_job_id=self.survey_job.id, key__exact="experiment_accession_code") .value ) logger.info("Surveying experiment with accession code: %s.", experiment_accession_code, survey_job=self.survey_job.id) try: experiment, platform_dict = self.create_experiment_from_api(experiment_accession_code) except UnsupportedPlatformException as e: logger.info("Experiment was not on a supported platform, skipping.", experiment_accession_code=experiment_accession_code, survey_job=self.survey_job.id) return None, [] except: logger.exception("Error occurred while surveying experiment!", experiment_accession_code=experiment_accession_code) return None, [] samples = self.create_samples_from_api(experiment, platform_dict) return experiment, samples
# sub_file["comment"] is only a list if there's
signals.py
from django.dispatch import receiver from django.db.models.signals import pre_save,post_save from django.contrib.auth.models import User from django.contrib.auth.signals import user_logged_out,user_logged_in from .models import Profile @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created:
@receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): instance.profile.save() @receiver(user_logged_out) def remove_online_status(request,**kwargs): p_pk = request.user.profile.pk online_status=Profile.objects.get(pk = p_pk) online_status.online=False online_status.save() @receiver(user_logged_in) def add_online_status(request,**kwargs): p_pk = request.user.profile.pk online_status=Profile.objects.get(pk = p_pk) online_status.online=True online_status.save()
Profile.objects.create(user=instance)
flickrgal.js
import React from "react" import flickrgal from "js/flickrgal.min.js" import flickrgalStyles from "styles/flickrgal.scss" import loading from "styles/loading.scss" class
extends React.Component { componentDidMount() { window.FlickrGal.init(); } render() { return <div id="flickrgal" data-collections='["mattfannin.nz"]' data-apikey='45e8b763369f3eaf10e31d694e70ccf4' data-userid='66303990@N07'></div>; } } export default FlickrGal
FlickrGal
checkpoint.py
# coding:utf-8 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Tuple import paddle from paddlehub.compat.task import checkpoint_pb2 from paddlehub.utils.log import logger CKPT_FILE_NAME = 'ckpt.meta' def
(checkpoint_dir: str, exe: paddle.static.Executor, main_program: paddle.static.Program) -> Tuple[bool, int, int, float]: ckpt_meta_path = os.path.join(checkpoint_dir, CKPT_FILE_NAME) ckpt = checkpoint_pb2.CheckPoint() logger.info('Try loading checkpoint from {}'.format(ckpt_meta_path)) if os.path.exists(ckpt_meta_path): with open(ckpt_meta_path, 'rb') as f: ckpt.ParseFromString(f.read()) current_epoch = 1 global_step = 0 best_score = -999 if ckpt.latest_model_dir: paddle.static.load(executor=exe, model_path=ckpt.latest_model_dir, program=main_program) # Compatible with older versions without best_score in checkpoint_pb2 try: best_score = ckpt.best_score except: best_score = -999 logger.info('PaddleHub model checkpoint loaded. current_epoch={}, ' 'global_step={}, best_score={:.5f}'.format(ckpt.current_epoch, ckpt.global_step, best_score)) return True, ckpt.current_epoch, ckpt.global_step, best_score logger.info('PaddleHub model checkpoint not found, start from scratch...') return False, current_epoch, global_step, best_score
load_checkpoint
test_unit_layer_gather.py
import functools import operator import os import os.path import sys import numpy as np # Bamboo utilities current_file = os.path.realpath(__file__) current_dir = os.path.dirname(current_file) sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python')) import tools # ============================================== # Objects for Python data reader # ============================================== # Note: The Python data reader imports this file as a module and calls # the functions below to ingest data. # Data input_size = 23 output_size = 15 seed = 202101280 # Sample access functions def get_sample(index): np.random.seed(seed+index) values = [np.random.normal() for _ in range(input_size)] indices = [ np.random.uniform(-1, input_size+1) for _ in range(output_size) ] return values + indices def num_samples(): return 25 def sample_dims(): return (input_size+output_size,) # ============================================== # Setup LBANN experiment # ============================================== def setup_experiment(lbann): """Construct LBANN experiment.
lbann (module): Module for LBANN Python frontend """ mini_batch_size = num_samples() // 2 trainer = lbann.Trainer(mini_batch_size) model = construct_model(lbann) data_reader = construct_data_reader(lbann) optimizer = lbann.NoOptimizer() return trainer, model, data_reader, optimizer def construct_model(lbann): """Construct LBANN model. Args: lbann (module): Module for LBANN Python frontend """ # Input data # Note: Sum with a weights layer so that gradient checking will # verify that error signals are correct. x = lbann.Identity(lbann.Input()) x_slice = lbann.Slice( x, slice_points=tools.str_list([0,input_size,input_size+output_size]), ) x0_weights = lbann.Weights( optimizer=lbann.SGD(), initializer=lbann.ConstantInitializer(value=0.0), name='input_weights', ) x0 = lbann.Sum( lbann.Identity(x_slice), lbann.WeightsLayer(weights=x0_weights, dims=tools.str_list(input_size)), ) x1 = lbann.Identity(x_slice) # Apply gather y0 = lbann.Gather(x0, x1) y1 = lbann.Concatenation([ lbann.Constant(value=i+1, num_neurons='1') for i in range(output_size) ]) y = lbann.Multiply(y0, y1) z = lbann.L2Norm2(y) # Objects for LBANN model layers = list(lbann.traverse_layer_graph(x)) metric = lbann.Metric(z, name='obj') obj = lbann.ObjectiveFunction(z) callbacks = [] # Compute expected metric value vals = [] for i in range(num_samples()): x = get_sample(i) x0 = x[:input_size] x1 = x[input_size:] y0 = np.zeros(output_size) for i in range(output_size): if 0 <= x1[i] < input_size: y0[i] = x0[int(x1[i])] z = 0 for i in range(output_size): z += ((i+1)*y0[i]) ** 2 vals.append(z) val = np.mean(vals) tol = 8 * val * np.finfo(np.float32).eps callbacks.append(lbann.CallbackCheckMetric( metric=metric.name, lower_bound=val-tol, upper_bound=val+tol, error_on_failure=True, execution_modes='test')) # Gradient checking callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True)) # Construct model num_epochs = 0 return lbann.Model(num_epochs, layers=layers, objective_function=obj, metrics=[metric], callbacks=callbacks) def construct_data_reader(lbann): """Construct Protobuf message for Python data reader. The Python data reader will import the current Python file to access the sample access functions. Args: lbann (module): Module for LBANN Python frontend """ # Note: The training data reader should be removed when # https://github.com/LLNL/lbann/issues/1098 is resolved. message = lbann.reader_pb2.DataReader() message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'train' ) ]) message.reader.extend([ tools.create_python_data_reader( lbann, current_file, 'get_sample', 'num_samples', 'sample_dims', 'test' ) ]) return message # ============================================== # Setup PyTest # ============================================== # Create test functions that can interact with PyTest for _test_func in tools.create_tests(setup_experiment, __file__): globals()[_test_func.__name__] = _test_func
Args:
SidebarAdjustStopLossEditingStage.tsx
import { Box } from '@theme-ui/components' import { PickCloseState } from 'components/dumb/PickCloseState' import { SliderValuePicker } from 'components/dumb/SliderValuePicker' import { AppLink } from 'components/Links' import { useTranslation } from 'next-i18next' import React from 'react' import { Text } from 'theme-ui' import { AdjustSlFormLayoutProps, SetDownsideProtectionInformation } from '../AdjustSlFormLayout' export function SidebarAdjustStopLossEditingStage({ token, txError, slValuePickerConfig, closePickerConfig, tokenPrice, ethPrice, vault, ilkData, gasEstimation, selectedSLValue, isEditing, collateralizationRatioAtNextPrice, ethBalance, gasEstimationUsd, }: AdjustSlFormLayoutProps) {
const { t } = useTranslation() return ( <> <Box mb={3}> <PickCloseState {...closePickerConfig} /> </Box> <Text as="p" variant="paragraph3" sx={{ color: 'lavender' }}> {t('protection.set-downside-protection-desc')}{' '} <AppLink href="https://kb.oasis.app/help/stop-loss-protection" sx={{ fontSize: 2 }}> {t('here')}. </AppLink> </Text> <Box mt={3}> <SliderValuePicker {...slValuePickerConfig} /> </Box> {isEditing && ( <> <Box> <SetDownsideProtectionInformation token={token} vault={vault} ilkData={ilkData} gasEstimation={gasEstimation} gasEstimationUsd={gasEstimationUsd} afterStopLossRatio={selectedSLValue} tokenPrice={tokenPrice} ethPrice={ethPrice} isCollateralActive={closePickerConfig.isCollateralActive} collateralizationRatioAtNextPrice={collateralizationRatioAtNextPrice} selectedSLValue={selectedSLValue} ethBalance={ethBalance} txError={txError} /> </Box> <Box sx={{ fontSize: 2 }}> <Text as="p" sx={{ mt: 3, fontWeight: 'semiBold' }}> {t('protection.not-guaranteed')} </Text> <Text as="p" sx={{ mb: 3 }}> {t('protection.guarantee-factors')}{' '} <AppLink href="https://kb.oasis.app/help/stop-loss-protection" sx={{ fontWeight: 'body' }} > {t('protection.learn-more-about-automation')} </AppLink> </Text> </Box> </> )} </> ) }
mod.rs
mod activity; mod skills; mod xup; pub fn routes() -> Vec<rocket::Route>
{ [skills::routes(), xup::routes(), activity::routes()].concat() }
state.rs
//! Utilities for game state management. use amethyst_input::is_close_requested; use crate::{ecs::prelude::World, GameData, StateEvent}; use std::fmt::Result as FmtResult; use std::fmt::{Display, Formatter}; /// Error type for errors occurring in StateMachine #[derive(Debug)] pub enum StateError { NoStatesPresent, } impl Display for StateError { fn fmt(&self, fmt: &mut Formatter<'_>) -> FmtResult { match *self { StateError::NoStatesPresent => write!( fmt, "Tried to start state machine without any states present" ), } } } /// State data encapsulates the data sent to all state functions from the application main loop. pub struct StateData<'a, T> where T: 'a, { /// Main `World` pub world: &'a mut World, /// User defined game data pub data: &'a mut T, } impl<'a, T> StateData<'a, T> where T: 'a, { /// Create a new state data pub fn new(world: &'a mut World, data: &'a mut T) -> Self { StateData { world, data } } } /// Types of state transitions. /// T is the type of shared data between states. /// E is the type of events pub enum Trans<T, E> { /// Continue as normal. None, /// Remove the active state and resume the next state on the stack or stop /// if there are none. Pop, /// Pause the active state and push a new state onto the stack. Push(Box<dyn State<T, E>>), /// Remove the current state on the stack and insert a different one. Switch(Box<dyn State<T, E>>), /// Stop and remove all states and shut down the engine. Quit, } /// Event queue to trigger state `Trans` from other places than a `State`'s methods. /// # Example: /// ```rust, ignore /// world.write_resource::<EventChannel<TransEvent<MyGameData, StateEvent>>>().single_write(Box::new(|| Trans::Quit)); /// ``` /// /// Transitions will be executed sequentially by Amethyst's `CoreApplication` update loop. pub type TransEvent<T, E> = Box<dyn Fn() -> Trans<T, E> + Send + Sync + 'static>; /// An empty `Trans`. Made to be used with `EmptyState`. pub type EmptyTrans = Trans<(), StateEvent>; /// A simple default `Trans`. Made to be used with `SimpleState`. /// By default it contains a `GameData` as its `StateData` and doesn't have a custom event type. pub type SimpleTrans<'a, 'b> = Trans<GameData<'a, 'b>, StateEvent>; /// A trait which defines game states that can be used by the state machine. pub trait State<T, E: Send + Sync + 'static> { /// Executed when the game state begins. fn on_start(&mut self, _data: StateData<'_, T>) {} /// Executed when the game state exits. fn on_stop(&mut self, _data: StateData<'_, T>) {} /// Executed when a different game state is pushed onto the stack. fn on_pause(&mut self, _data: StateData<'_, T>) {} /// Executed when the application returns to this game state once again. fn on_resume(&mut self, _data: StateData<'_, T>) {} /// Executed on every frame before updating, for use in reacting to events. fn handle_event(&mut self, _data: StateData<'_, T>, _event: E) -> Trans<T, E> { Trans::None } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// if this is the active state. fn fixed_update(&mut self, _data: StateData<'_, T>) -> Trans<T, E> { Trans::None } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// if this is the active state. fn update(&mut self, _data: StateData<'_, T>) -> Trans<T, E> { Trans::None } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_fixed_update(&mut self, _data: StateData<'_, T>) {} /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_update(&mut self, _data: StateData<'_, T>) {} } /// An empty `State` trait. It contains no `StateData` or custom `StateEvent`. pub trait EmptyState { /// Executed when the game state begins. fn on_start(&mut self, _data: StateData<'_, ()>) {} /// Executed when the game state exits. fn on_stop(&mut self, _data: StateData<'_, ()>) {} /// Executed when a different game state is pushed onto the stack. fn on_pause(&mut self, _data: StateData<'_, ()>) {} /// Executed when the application returns to this game state once again. fn on_resume(&mut self, _data: StateData<'_, ()>) {} /// Executed on every frame before updating, for use in reacting to events. fn handle_event(&mut self, _data: StateData<'_, ()>, event: StateEvent) -> EmptyTrans { if let StateEvent::Window(event) = &event { if is_close_requested(&event) { Trans::Quit } else { Trans::None } } else { Trans::None } } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default). fn fixed_update(&mut self, _data: StateData<'_, ()>) -> EmptyTrans { Trans::None } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit). fn update(&mut self, _data: StateData<'_, ()>) -> EmptyTrans { Trans::None } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_fixed_update(&mut self, _data: StateData<'_, ()>) {} /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_update(&mut self, _data: StateData<'_, ()>) {} } impl<T: EmptyState> State<(), StateEvent> for T { /// Executed when the game state begins. fn on_start(&mut self, data: StateData<'_, ()>) { self.on_start(data) } /// Executed when the game state exits. fn on_stop(&mut self, data: StateData<'_, ()>) { self.on_stop(data) } /// Executed when a different game state is pushed onto the stack. fn on_pause(&mut self, data: StateData<'_, ()>) { self.on_pause(data) } /// Executed when the application returns to this game state once again. fn on_resume(&mut self, data: StateData<'_, ()>) { self.on_resume(data) } /// Executed on every frame before updating, for use in reacting to events. fn handle_event(&mut self, data: StateData<'_, ()>, event: StateEvent) -> EmptyTrans { self.handle_event(data, event) } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default). fn fixed_update(&mut self, data: StateData<'_, ()>) -> EmptyTrans { self.fixed_update(data) } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit). fn update(&mut self, data: StateData<'_, ()>) -> EmptyTrans { self.update(data) } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_fixed_update(&mut self, data: StateData<'_, ()>) { self.shadow_fixed_update(data); } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_update(&mut self, data: StateData<'_, ()>) { self.shadow_update(data); } } /// A simple `State` trait. It contains `GameData` as its `StateData` and no custom `StateEvent`. pub trait SimpleState<'a, 'b> { /// Executed when the game state begins. fn on_start(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} /// Executed when the game state exits. fn on_stop(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} /// Executed when a different game state is pushed onto the stack. fn on_pause(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} /// Executed when the application returns to this game state once again. fn on_resume(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} /// Executed on every frame before updating, for use in reacting to events. fn handle_event( &mut self, _data: StateData<'_, GameData<'_, '_>>, event: StateEvent, ) -> SimpleTrans<'a, 'b> { if let StateEvent::Window(event) = &event { if is_close_requested(&event) { Trans::Quit } else { Trans::None } } else { Trans::None } } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default). fn fixed_update(&mut self, _data: StateData<'_, GameData<'_, '_>>) -> SimpleTrans<'a, 'b> { Trans::None } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit). fn update(&mut self, _data: &mut StateData<'_, GameData<'_, '_>>) -> SimpleTrans<'a, 'b> { Trans::None } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_fixed_update(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_update(&mut self, _data: StateData<'_, GameData<'_, '_>>) {} } impl<'a, 'b, T: SimpleState<'a, 'b>> State<GameData<'a, 'b>, StateEvent> for T { //pub trait SimpleState<'a,'b>: State<GameData<'a,'b>,()> { /// Executed when the game state begins. fn on_start(&mut self, data: StateData<'_, GameData<'_, '_>>) { self.on_start(data) } /// Executed when the game state exits. fn on_stop(&mut self, data: StateData<'_, GameData<'_, '_>>) { self.on_stop(data) } /// Executed when a different game state is pushed onto the stack. fn on_pause(&mut self, data: StateData<'_, GameData<'_, '_>>) { self.on_pause(data) } /// Executed when the application returns to this game state once again. fn on_resume(&mut self, data: StateData<'_, GameData<'_, '_>>) { self.on_resume(data) } /// Executed on every frame before updating, for use in reacting to events. fn handle_event( &mut self, data: StateData<'_, GameData<'_, '_>>, event: StateEvent, ) -> SimpleTrans<'a, 'b> { self.handle_event(data, event) } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default). fn fixed_update(&mut self, data: StateData<'_, GameData<'_, '_>>) -> SimpleTrans<'a, 'b> { self.fixed_update(data) } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit). fn update(&mut self, mut data: StateData<'_, GameData<'_, '_>>) -> SimpleTrans<'a, 'b> { let r = self.update(&mut data); data.data.update(&data.world); r } /// Executed repeatedly at stable, predictable intervals (1/60th of a second /// by default), /// even when this is not the active state, /// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_fixed_update(&mut self, data: StateData<'_, GameData<'_, '_>>) { self.shadow_fixed_update(data); } /// Executed on every frame immediately, as fast as the engine will allow (taking into account the frame rate limit), /// even when this is not the active state,
self.shadow_update(data); } } /// A simple stack-based state machine (pushdown automaton). #[derive(Derivative)] #[derivative(Debug)] pub struct StateMachine<'a, T, E> { running: bool, #[derivative(Debug = "ignore")] state_stack: Vec<Box<dyn State<T, E> + 'a>>, } impl<'a, T, E: Send + Sync + 'static> StateMachine<'a, T, E> { /// Creates a new state machine with the given initial state. pub fn new<S: State<T, E> + 'a>(initial_state: S) -> StateMachine<'a, T, E> { StateMachine { running: false, state_stack: vec![Box::new(initial_state)], } } /// Checks whether the state machine is running. pub fn is_running(&self) -> bool { self.running } /// Initializes the state machine. pub fn start(&mut self, data: StateData<'_, T>) -> Result<(), StateError> { if !self.running { let state = self .state_stack .last_mut() .ok_or(StateError::NoStatesPresent)?; state.on_start(data); self.running = true; } Ok(()) } /// Passes a single event to the active state to handle. pub fn handle_event(&mut self, data: StateData<'_, T>, event: E) { let StateData { world, data } = data; if self.running { let trans = match self.state_stack.last_mut() { Some(state) => state.handle_event(StateData { world, data }, event), None => Trans::None, }; self.transition(trans, StateData { world, data }); } } /// Updates the currently active state at a steady, fixed interval. pub fn fixed_update(&mut self, data: StateData<'_, T>) { let StateData { world, data } = data; if self.running { let trans = match self.state_stack.last_mut() { Some(state) => state.fixed_update(StateData { world, data }), None => Trans::None, }; for state in self.state_stack.iter_mut() { state.shadow_fixed_update(StateData { world, data }); } self.transition(trans, StateData { world, data }); } } /// Updates the currently active state immediately. pub fn update(&mut self, data: StateData<'_, T>) { let StateData { world, data } = data; if self.running { let trans = match self.state_stack.last_mut() { Some(state) => state.update(StateData { world, data }), None => Trans::None, }; for state in self.state_stack.iter_mut() { state.shadow_update(StateData { world, data }); } self.transition(trans, StateData { world, data }); } } /// Performs a state transition. /// Usually called by update or fixed_update by the user's defined `State`. /// This method can also be called when there are one or multiple `Trans` stored in the /// global `EventChannel<TransEvent<T, E>>`. Such `Trans` will be passed to this method /// sequentially in the order of insertion. pub fn transition(&mut self, request: Trans<T, E>, data: StateData<'_, T>) { if self.running { match request { Trans::None => (), Trans::Pop => self.pop(data), Trans::Push(state) => self.push(state, data), Trans::Switch(state) => self.switch(state, data), Trans::Quit => self.stop(data), } } } /// Removes the current state on the stack and inserts a different one. fn switch(&mut self, state: Box<dyn State<T, E>>, data: StateData<'_, T>) { if self.running { let StateData { world, data } = data; if let Some(mut state) = self.state_stack.pop() { state.on_stop(StateData { world, data }); } self.state_stack.push(state); //State was just pushed, thus pop will always succeed let state = self.state_stack.last_mut().unwrap(); state.on_start(StateData { world, data }); } } /// Pauses the active state and pushes a new state onto the state stack. fn push(&mut self, state: Box<dyn State<T, E>>, data: StateData<'_, T>) { if self.running { let StateData { world, data } = data; if let Some(state) = self.state_stack.last_mut() { state.on_pause(StateData { world, data }); } self.state_stack.push(state); //State was just pushed, thus pop will always succeed let state = self.state_stack.last_mut().unwrap(); state.on_start(StateData { world, data }); } } /// Stops and removes the active state and un-pauses the next state on the /// stack (if any). fn pop(&mut self, data: StateData<'_, T>) { if self.running { let StateData { world, data } = data; if let Some(mut state) = self.state_stack.pop() { state.on_stop(StateData { world, data }); } if let Some(state) = self.state_stack.last_mut() { state.on_resume(StateData { world, data }); } else { self.running = false; } } } /// Shuts the state machine down. pub(crate) fn stop(&mut self, data: StateData<'_, T>) { if self.running { let StateData { world, data } = data; while let Some(mut state) = self.state_stack.pop() { state.on_stop(StateData { world, data }); } self.running = false; } } } #[cfg(test)] mod tests { use super::*; struct State1(u8); struct State2; impl State<(), ()> for State1 { fn update(&mut self, _: StateData<'_, ()>) -> Trans<(), ()> { if self.0 > 0 { self.0 -= 1; Trans::None } else { Trans::Switch(Box::new(State2)) } } } impl State<(), ()> for State2 { fn update(&mut self, _: StateData<'_, ()>) -> Trans<(), ()> { Trans::Pop } } #[test] fn switch_pop() { use crate::ecs::prelude::World; let mut world = World::new(); let mut sm = StateMachine::new(State1(7)); // Unwrap here is fine because start can only fail when there are no states in the machine. sm.start(StateData::new(&mut world, &mut ())).unwrap(); for _ in 0..8 { sm.update(StateData::new(&mut world, &mut ())); assert!(sm.is_running()); } sm.update(StateData::new(&mut world, &mut ())); assert!(!sm.is_running()); } }
/// as long as this state is on the [StateMachine](struct.StateMachine.html)'s state-stack. fn shadow_update(&mut self, data: StateData<'_, GameData<'_, '_>>) {
schemas.py
from dataclasses import dataclass from datetime import datetime @dataclass class AuthResponse: email: str image_access: bool search_access: bool created: datetime modified: datetime @dataclass class FontResponse: filename: str id: str alias: str _self: str @dataclass class MemeRequest: template_id: str style: list[str] text: list[str] font: str extension: str redirect: bool @dataclass class CustomRequest: background: str style: str text: list[str] font: str extension: str redirect: bool @dataclass class MemeTemplateRequest: style: list[str] text: list[str] font: str extension: str redirect: bool @dataclass class AutomaticRequest: text: str safe: bool redirect: bool @dataclass class MemeResponse: url: str @dataclass class ExampleResponse: url: str template: str @dataclass class _Example: text: list[str] url: str @dataclass class TemplateResponse: id: str name: str lines: int overlays: int styles: list[str] blank: str example: _Example source: str _self: str @dataclass
error: str
class ErrorResponse:
main.go
package main import ( "context" "fmt" "log" "math/rand" crypto "github.com/libp2p/go-libp2p/gxlibs/github.com/libp2p/go-libp2p-crypto" peer "github.com/libp2p/go-libp2p/gxlibs/github.com/libp2p/go-libp2p-peer" ps "github.com/libp2p/go-libp2p/gxlibs/github.com/libp2p/go-libp2p-peerstore" swarm "github.com/libp2p/go-libp2p/gxlibs/github.com/libp2p/go-libp2p-swarm" bhost "github.com/libp2p/go-libp2p/p2p/host/basic" ma "github.com/libp2p/go-libp2p/gxlibs/github.com/multiformats/go-multiaddr" ) // helper method - create a lib-p2p host to listen on a port func
(port int, done chan bool) *Node { // Ignoring most errors for brevity // See echo example for more details and better implementation priv, pub, _ := crypto.GenerateKeyPair(crypto.Secp256k1, 256) pid, _ := peer.IDFromPublicKey(pub) listen, _ := ma.NewMultiaddr(fmt.Sprintf("/ip4/127.0.0.1/tcp/%d", port)) peerStore := ps.NewPeerstore() peerStore.AddPrivKey(pid, priv) peerStore.AddPubKey(pid, pub) n, _ := swarm.NewNetwork(context.Background(), []ma.Multiaddr{listen}, pid, peerStore, nil) host := bhost.New(n) return NewNode(host, done) } func main() { // Choose random ports between 10000-10100 rand.Seed(666) port1 := rand.Intn(100) + 10000 port2 := port1 + 1 done := make(chan bool, 1) // Make 2 hosts h1 := makeRandomNode(port1, done) h2 := makeRandomNode(port2, done) h1.Peerstore().AddAddrs(h2.ID(), h2.Addrs(), ps.PermanentAddrTTL) h2.Peerstore().AddAddrs(h1.ID(), h1.Addrs(), ps.PermanentAddrTTL) log.Printf("This is a conversation between %s and %s\n", h1.ID(), h2.ID()) // send messages using the protocols h1.Ping(h2.Host) h2.Ping(h1.Host) h1.Echo(h2.Host) h2.Echo(h1.Host) // block until all responses have been processed for i := 0; i < 4; i++ { <-done } }
makeRandomNode
docExamples.js
const quickdb = require('../../index.js'); // require('quickdb') for you /* DOCS */ async function createDoc(){ const { success, results, error } = await quickdb.doc.create( 'sweetTemplate.html', '<h1>Sweet Template Bro</h1>' ); console.log(success); console.log(results); console.log(error); }
const { success, results, error } = await quickdb.doc.read('sweetTemplate.html'); console.log(success); console.log(results); console.log(error); } readDoc(); /* ITEM_SETS */
createDoc(); async function readDoc(){
io.py
"""Functions for input/output""" import os import logging
from interactive_bayesian_optimisation.libs import utils import numpy as np from flask import json import simplejson.errors import yaml def get_file_item_max(file_dir, min_val=0): # Adding -1 to the list, allows to always find a max file_id_list = [int(session_id.split(".")[0]) for session_id in os.listdir(file_dir)] + [min_val - 1] return int(np.max(file_id_list)) def ensure_savedir_exists(save_dir=None, study_name=None, sub_path=None): if sub_path is None: os.makedirs(config.get_study_save_dir(save_dir, study_name), exist_ok=True) else: os.makedirs(os.path.join(config.get_study_save_dir(save_dir, study_name), sub_path), exist_ok=True) def get_new_user_id(save_dir=None, study_name=None): study_dir = config.get_study_save_dir(save_dir, study_name) ensure_savedir_exists(save_dir, study_name) new_id = get_file_item_max(study_dir) + 1 return str(new_id) def get_new_session_id(user_id=None, save_dir=None, study_name=None): study_dir = os.path.join(config.get_study_save_dir(save_dir, study_name), str(user_id)) ensure_savedir_exists(save_dir, study_name, str(user_id)) new_id = get_file_item_max(study_dir) + 1 return str(new_id) def load_data(user_id, session_id, save_dir=None, study_name=None): study_dir = os.path.join(config.get_study_save_dir(save_dir, study_name), str(user_id)) session_filename = str(session_id) + ".save.json" session_file_path = os.path.join(study_dir, session_filename) with open(session_file_path, "r") as session_file: try: return json.load(session_file) except simplejson.errors.JSONDecodeError as e: logging.error("Possibly malformed JSON string:\n\n" "-----------------------------------\n" "{}\n" "-----------------------------------".format(session_file.read())) raise e def save_data(data, user_id, session_id, save_dir=None, study_name=None, incremental=False, override_name=None): extension = ".save.json" study_dir = os.path.join(config.get_study_save_dir(save_dir, study_name), str(user_id)) session_filename = str(session_id) + ("" if override_name is None else override_name) session_file_path = os.path.join(study_dir, session_filename) # Save JSONS in a list of JSON objects if incremental == False: session_file_path += extension if os.path.exists(session_file_path): save_data_list = load_data(user_id, session_id, study_name=study_name) else: save_data_list = list() save_data_list.append(data) with open(session_file_path, "w") as session_file: session_file.write(utils.remove_nan(json.dumps(save_data_list))) # Save JSON in a folder of JSON files (one per iteration) else: # incremental == True: ensure_savedir_exists(save_dir, study_name, os.path.join(str(user_id), str(session_id))) new_save_name = get_file_item_max(session_file_path) + 1 session_file_path = os.path.join(session_file_path, str(new_save_name) + extension) with open(session_file_path, "w") as session_file: session_file.write(utils.remove_nan(json.dumps(data))) def load_settings(settings_file_name): settings_file_path = os.path.join(config.SETTINGS_PATH, settings_file_name + ".yaml") if os.path.exists(settings_file_path): with open(settings_file_path) as settings_file_name: return yaml.load(settings_file_name) else: logging.warning("Settings file {} was not found in {}.".format(settings_file_name, config.SETTINGS_PATH)) with open(os.path.join(config.SETTINGS_PATH, "default.yaml")) as settings_file_name: return yaml.load(settings_file_name)
from interactive_bayesian_optimisation import config
types.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // source: types.proto /* Package trezor is a generated protocol buffer package. It is generated from these files: types.proto messages.proto It has these top-level messages: HDNodeType HDNodePathType CoinType MultisigRedeemScriptType TxInputType TxOutputType TxOutputBinType TransactionType TxRequestDetailsType TxRequestSerializedType IdentityType Initialize GetFeatures Features ClearSession ApplySettings ApplyFlags ChangePin Ping Success Failure ButtonRequest ButtonAck PinMatrixRequest PinMatrixAck Cancel PassphraseRequest PassphraseAck GetEntropy Entropy GetPublicKey PublicKey GetAddress CphereumGetAddress Address CphereumAddress WipeDevice LoadDevice ResetDevice BackupDevice EntropyRequest EntropyAck RecoveryDevice WordRequest WordAck SignMessage VerifyMessage MessageSignature EncryptMessage EncryptedMessage DecryptMessage DecryptedMessage CipherKeyValue CipheredKeyValue EstimateTxSize TxSize SignTx SimpleSignTx TxRequest TxAck CphereumSignTx CphereumTxRequest CphereumTxAck CphereumSignMessage CphereumVerifyMessage CphereumMessageSignature SignIdentity SignedIdentity GetECDHSessionKey ECDHSessionKey SetU2FCounter FirmwareErase FirmwareRequest FirmwareUpload SelfTest DebugLinkDecision DebugLinkGetState DebugLinkState DebugLinkStop DebugLinkLog DebugLinkMemoryRead DebugLinkMemory DebugLinkMemoryWrite DebugLinkFlashErase */ package trezor import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // * // Type of failures returned by Failure message // @used_in Failure type FailureType int32 const ( FailureType_Failure_UnexpectedMessage FailureType = 1 FailureType_Failure_ButtonExpected FailureType = 2 FailureType_Failure_DataError FailureType = 3 FailureType_Failure_ActionCancelled FailureType = 4 FailureType_Failure_PinExpected FailureType = 5 FailureType_Failure_PinCancelled FailureType = 6 FailureType_Failure_PinInvalid FailureType = 7 FailureType_Failure_InvalidSignature FailureType = 8 FailureType_Failure_ProcessError FailureType = 9 FailureType_Failure_NotEnoughFunds FailureType = 10 FailureType_Failure_NotInitialized FailureType = 11 FailureType_Failure_FirmwareError FailureType = 99 ) var FailureType_name = map[int32]string{ 1: "Failure_UnexpectedMessage", 2: "Failure_ButtonExpected", 3: "Failure_DataError", 4: "Failure_ActionCancelled", 5: "Failure_PinExpected", 6: "Failure_PinCancelled", 7: "Failure_PinInvalid", 8: "Failure_InvalidSignature", 9: "Failure_ProcessError", 10: "Failure_NotEnoughFunds", 11: "Failure_NotInitialized", 99: "Failure_FirmwareError", } var FailureType_value = map[string]int32{ "Failure_UnexpectedMessage": 1, "Failure_ButtonExpected": 2, "Failure_DataError": 3, "Failure_ActionCancelled": 4, "Failure_PinExpected": 5, "Failure_PinCancelled": 6, "Failure_PinInvalid": 7, "Failure_InvalidSignature": 8, "Failure_ProcessError": 9, "Failure_NotEnoughFunds": 10, "Failure_NotInitialized": 11, "Failure_FirmwareError": 99, } func (x FailureType) Enum() *FailureType { p := new(FailureType) *p = x return p } func (x FailureType) String() string { return proto.EnumName(FailureType_name, int32(x)) } func (x *FailureType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FailureType_value, data, "FailureType") if err != nil { return err } *x = FailureType(value) return nil } func (FailureType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } // * // Type of script which will be used for transaction output // @used_in TxOutputType type OutputScriptType int32 const ( OutputScriptType_PAYTOADDRESS OutputScriptType = 0 OutputScriptType_PAYTOSCRIPTHASH OutputScriptType = 1 OutputScriptType_PAYTOMULTISIG OutputScriptType = 2 OutputScriptType_PAYTOOPRETURN OutputScriptType = 3 OutputScriptType_PAYTOWITNESS OutputScriptType = 4 OutputScriptType_PAYTOP2SHWITNESS OutputScriptType = 5 ) var OutputScriptType_name = map[int32]string{ 0: "PAYTOADDRESS", 1: "PAYTOSCRIPTHASH", 2: "PAYTOMULTISIG", 3: "PAYTOOPRETURN", 4: "PAYTOWITNESS", 5: "PAYTOP2SHWITNESS", } var OutputScriptType_value = map[string]int32{ "PAYTOADDRESS": 0, "PAYTOSCRIPTHASH": 1, "PAYTOMULTISIG": 2, "PAYTOOPRETURN": 3, "PAYTOWITNESS": 4, "PAYTOP2SHWITNESS": 5, } func (x OutputScriptType) Enum() *OutputScriptType { p := new(OutputScriptType) *p = x return p } func (x OutputScriptType) String() string { return proto.EnumName(OutputScriptType_name, int32(x)) } func (x *OutputScriptType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(OutputScriptType_value, data, "OutputScriptType") if err != nil { return err } *x = OutputScriptType(value) return nil } func (OutputScriptType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } // * // Type of script which will be used for transaction output // @used_in TxInputType type InputScriptType int32 const ( InputScriptType_SPENDADDRESS InputScriptType = 0 InputScriptType_SPENDMULTISIG InputScriptType = 1 InputScriptType_EXTERNAL InputScriptType = 2 InputScriptType_SPENDWITNESS InputScriptType = 3 InputScriptType_SPENDP2SHWITNESS InputScriptType = 4 ) var InputScriptType_name = map[int32]string{ 0: "SPENDADDRESS", 1: "SPENDMULTISIG", 2: "EXTERNAL", 3: "SPENDWITNESS", 4: "SPENDP2SHWITNESS", } var InputScriptType_value = map[string]int32{ "SPENDADDRESS": 0, "SPENDMULTISIG": 1, "EXTERNAL": 2, "SPENDWITNESS": 3, "SPENDP2SHWITNESS": 4, } func (x InputScriptType) Enum() *InputScriptType { p := new(InputScriptType) *p = x return p } func (x InputScriptType) String() string { return proto.EnumName(InputScriptType_name, int32(x)) } func (x *InputScriptType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(InputScriptType_value, data, "InputScriptType") if err != nil { return err } *x = InputScriptType(value) return nil } func (InputScriptType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } // * // Type of information required by transaction signing process // @used_in TxRequest type RequestType int32 const ( RequestType_TXINPUT RequestType = 0 RequestType_TXOUTPUT RequestType = 1 RequestType_TXMETA RequestType = 2 RequestType_TXFINISHED RequestType = 3 RequestType_TXEXTRADATA RequestType = 4 ) var RequestType_name = map[int32]string{ 0: "TXINPUT", 1: "TXOUTPUT", 2: "TXMETA", 3: "TXFINISHED", 4: "TXEXTRADATA", } var RequestType_value = map[string]int32{ "TXINPUT": 0, "TXOUTPUT": 1, "TXMETA": 2, "TXFINISHED": 3, "TXEXTRADATA": 4, } func (x RequestType) Enum() *RequestType { p := new(RequestType) *p = x return p } func (x RequestType) String() string { return proto.EnumName(RequestType_name, int32(x)) } func (x *RequestType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(RequestType_value, data, "RequestType") if err != nil { return err } *x = RequestType(value) return nil } func (RequestType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } // * // Type of button request // @used_in ButtonRequest type ButtonRequestType int32 const ( ButtonRequestType_ButtonRequest_Other ButtonRequestType = 1 ButtonRequestType_ButtonRequest_FeeOverThreshold ButtonRequestType = 2 ButtonRequestType_ButtonRequest_ConfirmOutput ButtonRequestType = 3 ButtonRequestType_ButtonRequest_ResetDevice ButtonRequestType = 4 ButtonRequestType_ButtonRequest_ConfirmWord ButtonRequestType = 5 ButtonRequestType_ButtonRequest_WipeDevice ButtonRequestType = 6 ButtonRequestType_ButtonRequest_ProtectCall ButtonRequestType = 7 ButtonRequestType_ButtonRequest_SignTx ButtonRequestType = 8 ButtonRequestType_ButtonRequest_FirmwareCheck ButtonRequestType = 9 ButtonRequestType_ButtonRequest_Address ButtonRequestType = 10 ButtonRequestType_ButtonRequest_PublicKey ButtonRequestType = 11 ) var ButtonRequestType_name = map[int32]string{ 1: "ButtonRequest_Other", 2: "ButtonRequest_FeeOverThreshold", 3: "ButtonRequest_ConfirmOutput", 4: "ButtonRequest_ResetDevice", 5: "ButtonRequest_ConfirmWord", 6: "ButtonRequest_WipeDevice", 7: "ButtonRequest_ProtectCall", 8: "ButtonRequest_SignTx", 9: "ButtonRequest_FirmwareCheck", 10: "ButtonRequest_Address", 11: "ButtonRequest_PublicKey", } var ButtonRequestType_value = map[string]int32{ "ButtonRequest_Other": 1, "ButtonRequest_FeeOverThreshold": 2, "ButtonRequest_ConfirmOutput": 3, "ButtonRequest_ResetDevice": 4, "ButtonRequest_ConfirmWord": 5, "ButtonRequest_WipeDevice": 6, "ButtonRequest_ProtectCall": 7, "ButtonRequest_SignTx": 8, "ButtonRequest_FirmwareCheck": 9, "ButtonRequest_Address": 10, "ButtonRequest_PublicKey": 11, } func (x ButtonRequestType) Enum() *ButtonRequestType { p := new(ButtonRequestType) *p = x return p } func (x ButtonRequestType) String() string { return proto.EnumName(ButtonRequestType_name, int32(x)) } func (x *ButtonRequestType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(ButtonRequestType_value, data, "ButtonRequestType") if err != nil { return err } *x = ButtonRequestType(value) return nil } func (ButtonRequestType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } // * // Type of PIN request // @used_in PinMatrixRequest type PinMatrixRequestType int32 const ( PinMatrixRequestType_PinMatrixRequestType_Current PinMatrixRequestType = 1 PinMatrixRequestType_PinMatrixRequestType_NewFirst PinMatrixRequestType = 2 PinMatrixRequestType_PinMatrixRequestType_NewSecond PinMatrixRequestType = 3 ) var PinMatrixRequestType_name = map[int32]string{ 1: "PinMatrixRequestType_Current", 2: "PinMatrixRequestType_NewFirst", 3: "PinMatrixRequestType_NewSecond", } var PinMatrixRequestType_value = map[string]int32{ "PinMatrixRequestType_Current": 1, "PinMatrixRequestType_NewFirst": 2, "PinMatrixRequestType_NewSecond": 3, } func (x PinMatrixRequestType) Enum() *PinMatrixRequestType { p := new(PinMatrixRequestType) *p = x return p } func (x PinMatrixRequestType) String() string { return proto.EnumName(PinMatrixRequestType_name, int32(x)) } func (x *PinMatrixRequestType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(PinMatrixRequestType_value, data, "PinMatrixRequestType") if err != nil { return err } *x = PinMatrixRequestType(value) return nil } func (PinMatrixRequestType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } // * // Type of recovery procedure. These should be used as bitmask, e.g., // `RecoveryDeviceType_ScrambledWords | RecoveryDeviceType_Matrix` // listing every method supported by the host computer. // // Note that ScrambledWords must be supported by every implementation
// @used_in RecoveryDevice type RecoveryDeviceType int32 const ( // use powers of two when extending this field RecoveryDeviceType_RecoveryDeviceType_ScrambledWords RecoveryDeviceType = 0 RecoveryDeviceType_RecoveryDeviceType_Matrix RecoveryDeviceType = 1 ) var RecoveryDeviceType_name = map[int32]string{ 0: "RecoveryDeviceType_ScrambledWords", 1: "RecoveryDeviceType_Matrix", } var RecoveryDeviceType_value = map[string]int32{ "RecoveryDeviceType_ScrambledWords": 0, "RecoveryDeviceType_Matrix": 1, } func (x RecoveryDeviceType) Enum() *RecoveryDeviceType { p := new(RecoveryDeviceType) *p = x return p } func (x RecoveryDeviceType) String() string { return proto.EnumName(RecoveryDeviceType_name, int32(x)) } func (x *RecoveryDeviceType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(RecoveryDeviceType_value, data, "RecoveryDeviceType") if err != nil { return err } *x = RecoveryDeviceType(value) return nil } func (RecoveryDeviceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } // * // Type of Recovery Word request // @used_in WordRequest type WordRequestType int32 const ( WordRequestType_WordRequestType_Plain WordRequestType = 0 WordRequestType_WordRequestType_Matrix9 WordRequestType = 1 WordRequestType_WordRequestType_Matrix6 WordRequestType = 2 ) var WordRequestType_name = map[int32]string{ 0: "WordRequestType_Plain", 1: "WordRequestType_Matrix9", 2: "WordRequestType_Matrix6", } var WordRequestType_value = map[string]int32{ "WordRequestType_Plain": 0, "WordRequestType_Matrix9": 1, "WordRequestType_Matrix6": 2, } func (x WordRequestType) Enum() *WordRequestType { p := new(WordRequestType) *p = x return p } func (x WordRequestType) String() string { return proto.EnumName(WordRequestType_name, int32(x)) } func (x *WordRequestType) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(WordRequestType_value, data, "WordRequestType") if err != nil { return err } *x = WordRequestType(value) return nil } func (WordRequestType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } // * // Structure representing BIP32 (hierarchical deterministic) node // Used for imports of private key into the device and exporting public key out of device // @used_in PublicKey // @used_in LoadDevice // @used_in DebugLinkState // @used_in Storage type HDNodeType struct { Depth *uint32 `protobuf:"varint,1,req,name=depth" json:"depth,omitempty"` Fingerprint *uint32 `protobuf:"varint,2,req,name=fingerprint" json:"fingerprint,omitempty"` ChildNum *uint32 `protobuf:"varint,3,req,name=child_num,json=childNum" json:"child_num,omitempty"` ChainCode []byte `protobuf:"bytes,4,req,name=chain_code,json=chainCode" json:"chain_code,omitempty"` PrivateKey []byte `protobuf:"bytes,5,opt,name=private_key,json=privateKey" json:"private_key,omitempty"` PublicKey []byte `protobuf:"bytes,6,opt,name=public_key,json=publicKey" json:"public_key,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *HDNodeType) Reset() { *m = HDNodeType{} } func (m *HDNodeType) String() string { return proto.CompactTextString(m) } func (*HDNodeType) ProtoMessage() {} func (*HDNodeType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *HDNodeType) GetDepth() uint32 { if m != nil && m.Depth != nil { return *m.Depth } return 0 } func (m *HDNodeType) GetFingerprint() uint32 { if m != nil && m.Fingerprint != nil { return *m.Fingerprint } return 0 } func (m *HDNodeType) GetChildNum() uint32 { if m != nil && m.ChildNum != nil { return *m.ChildNum } return 0 } func (m *HDNodeType) GetChainCode() []byte { if m != nil { return m.ChainCode } return nil } func (m *HDNodeType) GetPrivateKey() []byte { if m != nil { return m.PrivateKey } return nil } func (m *HDNodeType) GetPublicKey() []byte { if m != nil { return m.PublicKey } return nil } type HDNodePathType struct { Node *HDNodeType `protobuf:"bytes,1,req,name=node" json:"node,omitempty"` AddressN []uint32 `protobuf:"varint,2,rep,name=address_n,json=addressN" json:"address_n,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *HDNodePathType) Reset() { *m = HDNodePathType{} } func (m *HDNodePathType) String() string { return proto.CompactTextString(m) } func (*HDNodePathType) ProtoMessage() {} func (*HDNodePathType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *HDNodePathType) GetNode() *HDNodeType { if m != nil { return m.Node } return nil } func (m *HDNodePathType) GetAddressN() []uint32 { if m != nil { return m.AddressN } return nil } // * // Structure representing Coin // @used_in Features type CoinType struct { CoinName *string `protobuf:"bytes,1,opt,name=coin_name,json=coinName" json:"coin_name,omitempty"` CoinShortcut *string `protobuf:"bytes,2,opt,name=coin_shortcut,json=coinShortcut" json:"coin_shortcut,omitempty"` AddressType *uint32 `protobuf:"varint,3,opt,name=address_type,json=addressType,def=0" json:"address_type,omitempty"` MaxfeeKb *uint64 `protobuf:"varint,4,opt,name=maxfee_kb,json=maxfeeKb" json:"maxfee_kb,omitempty"` AddressTypeP2Sh *uint32 `protobuf:"varint,5,opt,name=address_type_p2sh,json=addressTypeP2sh,def=5" json:"address_type_p2sh,omitempty"` SignedMessageHeader *string `protobuf:"bytes,8,opt,name=signed_message_header,json=signedMessageHeader" json:"signed_message_header,omitempty"` XpubMagic *uint32 `protobuf:"varint,9,opt,name=xpub_magic,json=xpubMagic,def=76067358" json:"xpub_magic,omitempty"` XprvMagic *uint32 `protobuf:"varint,10,opt,name=xprv_magic,json=xprvMagic,def=76066276" json:"xprv_magic,omitempty"` Segwit *bool `protobuf:"varint,11,opt,name=segwit" json:"segwit,omitempty"` Forkid *uint32 `protobuf:"varint,12,opt,name=forkid" json:"forkid,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *CoinType) Reset() { *m = CoinType{} } func (m *CoinType) String() string { return proto.CompactTextString(m) } func (*CoinType) ProtoMessage() {} func (*CoinType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } const Default_CoinType_AddressType uint32 = 0 const Default_CoinType_AddressTypeP2Sh uint32 = 5 const Default_CoinType_XpubMagic uint32 = 76067358 const Default_CoinType_XprvMagic uint32 = 76066276 func (m *CoinType) GetCoinName() string { if m != nil && m.CoinName != nil { return *m.CoinName } return "" } func (m *CoinType) GetCoinShortcut() string { if m != nil && m.CoinShortcut != nil { return *m.CoinShortcut } return "" } func (m *CoinType) GetAddressType() uint32 { if m != nil && m.AddressType != nil { return *m.AddressType } return Default_CoinType_AddressType } func (m *CoinType) GetMaxfeeKb() uint64 { if m != nil && m.MaxfeeKb != nil { return *m.MaxfeeKb } return 0 } func (m *CoinType) GetAddressTypeP2Sh() uint32 { if m != nil && m.AddressTypeP2Sh != nil { return *m.AddressTypeP2Sh } return Default_CoinType_AddressTypeP2Sh } func (m *CoinType) GetSignedMessageHeader() string { if m != nil && m.SignedMessageHeader != nil { return *m.SignedMessageHeader } return "" } func (m *CoinType) GetXpubMagic() uint32 { if m != nil && m.XpubMagic != nil { return *m.XpubMagic } return Default_CoinType_XpubMagic } func (m *CoinType) GetXprvMagic() uint32 { if m != nil && m.XprvMagic != nil { return *m.XprvMagic } return Default_CoinType_XprvMagic } func (m *CoinType) GetSegwit() bool { if m != nil && m.Segwit != nil { return *m.Segwit } return false } func (m *CoinType) GetForkid() uint32 { if m != nil && m.Forkid != nil { return *m.Forkid } return 0 } // * // Type of redeem script used in input // @used_in TxInputType type MultisigRedeemScriptType struct { Pubkeys []*HDNodePathType `protobuf:"bytes,1,rep,name=pubkeys" json:"pubkeys,omitempty"` Signatures [][]byte `protobuf:"bytes,2,rep,name=signatures" json:"signatures,omitempty"` M *uint32 `protobuf:"varint,3,opt,name=m" json:"m,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *MultisigRedeemScriptType) Reset() { *m = MultisigRedeemScriptType{} } func (m *MultisigRedeemScriptType) String() string { return proto.CompactTextString(m) } func (*MultisigRedeemScriptType) ProtoMessage() {} func (*MultisigRedeemScriptType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *MultisigRedeemScriptType) GetPubkeys() []*HDNodePathType { if m != nil { return m.Pubkeys } return nil } func (m *MultisigRedeemScriptType) GetSignatures() [][]byte { if m != nil { return m.Signatures } return nil } func (m *MultisigRedeemScriptType) GetM() uint32 { if m != nil && m.M != nil { return *m.M } return 0 } // * // Structure representing transaction input // @used_in SimpleSignTx // @used_in TransactionType type TxInputType struct { AddressN []uint32 `protobuf:"varint,1,rep,name=address_n,json=addressN" json:"address_n,omitempty"` PrevHash []byte `protobuf:"bytes,2,req,name=prev_hash,json=prevHash" json:"prev_hash,omitempty"` PrevIndex *uint32 `protobuf:"varint,3,req,name=prev_index,json=prevIndex" json:"prev_index,omitempty"` ScriptSig []byte `protobuf:"bytes,4,opt,name=script_sig,json=scriptSig" json:"script_sig,omitempty"` Sequence *uint32 `protobuf:"varint,5,opt,name=sequence,def=4294967295" json:"sequence,omitempty"` ScriptType *InputScriptType `protobuf:"varint,6,opt,name=script_type,json=scriptType,enum=InputScriptType,def=0" json:"script_type,omitempty"` Multisig *MultisigRedeemScriptType `protobuf:"bytes,7,opt,name=multisig" json:"multisig,omitempty"` Amount *uint64 `protobuf:"varint,8,opt,name=amount" json:"amount,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TxInputType) Reset() { *m = TxInputType{} } func (m *TxInputType) String() string { return proto.CompactTextString(m) } func (*TxInputType) ProtoMessage() {} func (*TxInputType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } const Default_TxInputType_Sequence uint32 = 4294967295 const Default_TxInputType_ScriptType InputScriptType = InputScriptType_SPENDADDRESS func (m *TxInputType) GetAddressN() []uint32 { if m != nil { return m.AddressN } return nil } func (m *TxInputType) GetPrevHash() []byte { if m != nil { return m.PrevHash } return nil } func (m *TxInputType) GetPrevIndex() uint32 { if m != nil && m.PrevIndex != nil { return *m.PrevIndex } return 0 } func (m *TxInputType) GetScriptSig() []byte { if m != nil { return m.ScriptSig } return nil } func (m *TxInputType) GetSequence() uint32 { if m != nil && m.Sequence != nil { return *m.Sequence } return Default_TxInputType_Sequence } func (m *TxInputType) GetScriptType() InputScriptType { if m != nil && m.ScriptType != nil { return *m.ScriptType } return Default_TxInputType_ScriptType } func (m *TxInputType) GetMultisig() *MultisigRedeemScriptType { if m != nil { return m.Multisig } return nil } func (m *TxInputType) GetAmount() uint64 { if m != nil && m.Amount != nil { return *m.Amount } return 0 } // * // Structure representing transaction output // @used_in SimpleSignTx // @used_in TransactionType type TxOutputType struct { Address *string `protobuf:"bytes,1,opt,name=address" json:"address,omitempty"` AddressN []uint32 `protobuf:"varint,2,rep,name=address_n,json=addressN" json:"address_n,omitempty"` Amount *uint64 `protobuf:"varint,3,req,name=amount" json:"amount,omitempty"` ScriptType *OutputScriptType `protobuf:"varint,4,req,name=script_type,json=scriptType,enum=OutputScriptType" json:"script_type,omitempty"` Multisig *MultisigRedeemScriptType `protobuf:"bytes,5,opt,name=multisig" json:"multisig,omitempty"` OpReturnData []byte `protobuf:"bytes,6,opt,name=op_return_data,json=opReturnData" json:"op_return_data,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TxOutputType) Reset() { *m = TxOutputType{} } func (m *TxOutputType) String() string { return proto.CompactTextString(m) } func (*TxOutputType) ProtoMessage() {} func (*TxOutputType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *TxOutputType) GetAddress() string { if m != nil && m.Address != nil { return *m.Address } return "" } func (m *TxOutputType) GetAddressN() []uint32 { if m != nil { return m.AddressN } return nil } func (m *TxOutputType) GetAmount() uint64 { if m != nil && m.Amount != nil { return *m.Amount } return 0 } func (m *TxOutputType) GetScriptType() OutputScriptType { if m != nil && m.ScriptType != nil { return *m.ScriptType } return OutputScriptType_PAYTOADDRESS } func (m *TxOutputType) GetMultisig() *MultisigRedeemScriptType { if m != nil { return m.Multisig } return nil } func (m *TxOutputType) GetOpReturnData() []byte { if m != nil { return m.OpReturnData } return nil } // * // Structure representing compiled transaction output // @used_in TransactionType type TxOutputBinType struct { Amount *uint64 `protobuf:"varint,1,req,name=amount" json:"amount,omitempty"` ScriptPubkey []byte `protobuf:"bytes,2,req,name=script_pubkey,json=scriptPubkey" json:"script_pubkey,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TxOutputBinType) Reset() { *m = TxOutputBinType{} } func (m *TxOutputBinType) String() string { return proto.CompactTextString(m) } func (*TxOutputBinType) ProtoMessage() {} func (*TxOutputBinType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *TxOutputBinType) GetAmount() uint64 { if m != nil && m.Amount != nil { return *m.Amount } return 0 } func (m *TxOutputBinType) GetScriptPubkey() []byte { if m != nil { return m.ScriptPubkey } return nil } // * // Structure representing transaction // @used_in SimpleSignTx type TransactionType struct { Version *uint32 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` Inputs []*TxInputType `protobuf:"bytes,2,rep,name=inputs" json:"inputs,omitempty"` BinOutputs []*TxOutputBinType `protobuf:"bytes,3,rep,name=bin_outputs,json=binOutputs" json:"bin_outputs,omitempty"` Outputs []*TxOutputType `protobuf:"bytes,5,rep,name=outputs" json:"outputs,omitempty"` LockTime *uint32 `protobuf:"varint,4,opt,name=lock_time,json=lockTime" json:"lock_time,omitempty"` InputsCnt *uint32 `protobuf:"varint,6,opt,name=inputs_cnt,json=inputsCnt" json:"inputs_cnt,omitempty"` OutputsCnt *uint32 `protobuf:"varint,7,opt,name=outputs_cnt,json=outputsCnt" json:"outputs_cnt,omitempty"` ExtraData []byte `protobuf:"bytes,8,opt,name=extra_data,json=extraData" json:"extra_data,omitempty"` ExtraDataLen *uint32 `protobuf:"varint,9,opt,name=extra_data_len,json=extraDataLen" json:"extra_data_len,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TransactionType) Reset() { *m = TransactionType{} } func (m *TransactionType) String() string { return proto.CompactTextString(m) } func (*TransactionType) ProtoMessage() {} func (*TransactionType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *TransactionType) GetVersion() uint32 { if m != nil && m.Version != nil { return *m.Version } return 0 } func (m *TransactionType) GetInputs() []*TxInputType { if m != nil { return m.Inputs } return nil } func (m *TransactionType) GetBinOutputs() []*TxOutputBinType { if m != nil { return m.BinOutputs } return nil } func (m *TransactionType) GetOutputs() []*TxOutputType { if m != nil { return m.Outputs } return nil } func (m *TransactionType) GetLockTime() uint32 { if m != nil && m.LockTime != nil { return *m.LockTime } return 0 } func (m *TransactionType) GetInputsCnt() uint32 { if m != nil && m.InputsCnt != nil { return *m.InputsCnt } return 0 } func (m *TransactionType) GetOutputsCnt() uint32 { if m != nil && m.OutputsCnt != nil { return *m.OutputsCnt } return 0 } func (m *TransactionType) GetExtraData() []byte { if m != nil { return m.ExtraData } return nil } func (m *TransactionType) GetExtraDataLen() uint32 { if m != nil && m.ExtraDataLen != nil { return *m.ExtraDataLen } return 0 } // * // Structure representing request details // @used_in TxRequest type TxRequestDetailsType struct { RequestIndex *uint32 `protobuf:"varint,1,opt,name=request_index,json=requestIndex" json:"request_index,omitempty"` TxHash []byte `protobuf:"bytes,2,opt,name=tx_hash,json=txHash" json:"tx_hash,omitempty"` ExtraDataLen *uint32 `protobuf:"varint,3,opt,name=extra_data_len,json=extraDataLen" json:"extra_data_len,omitempty"` ExtraDataOffset *uint32 `protobuf:"varint,4,opt,name=extra_data_offset,json=extraDataOffset" json:"extra_data_offset,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TxRequestDetailsType) Reset() { *m = TxRequestDetailsType{} } func (m *TxRequestDetailsType) String() string { return proto.CompactTextString(m) } func (*TxRequestDetailsType) ProtoMessage() {} func (*TxRequestDetailsType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *TxRequestDetailsType) GetRequestIndex() uint32 { if m != nil && m.RequestIndex != nil { return *m.RequestIndex } return 0 } func (m *TxRequestDetailsType) GetTxHash() []byte { if m != nil { return m.TxHash } return nil } func (m *TxRequestDetailsType) GetExtraDataLen() uint32 { if m != nil && m.ExtraDataLen != nil { return *m.ExtraDataLen } return 0 } func (m *TxRequestDetailsType) GetExtraDataOffset() uint32 { if m != nil && m.ExtraDataOffset != nil { return *m.ExtraDataOffset } return 0 } // * // Structure representing serialized data // @used_in TxRequest type TxRequestSerializedType struct { SignatureIndex *uint32 `protobuf:"varint,1,opt,name=signature_index,json=signatureIndex" json:"signature_index,omitempty"` Signature []byte `protobuf:"bytes,2,opt,name=signature" json:"signature,omitempty"` SerializedTx []byte `protobuf:"bytes,3,opt,name=serialized_tx,json=serializedTx" json:"serialized_tx,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *TxRequestSerializedType) Reset() { *m = TxRequestSerializedType{} } func (m *TxRequestSerializedType) String() string { return proto.CompactTextString(m) } func (*TxRequestSerializedType) ProtoMessage() {} func (*TxRequestSerializedType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *TxRequestSerializedType) GetSignatureIndex() uint32 { if m != nil && m.SignatureIndex != nil { return *m.SignatureIndex } return 0 } func (m *TxRequestSerializedType) GetSignature() []byte { if m != nil { return m.Signature } return nil } func (m *TxRequestSerializedType) GetSerializedTx() []byte { if m != nil { return m.SerializedTx } return nil } // * // Structure representing identity data // @used_in IdentityType type IdentityType struct { Proto *string `protobuf:"bytes,1,opt,name=proto" json:"proto,omitempty"` User *string `protobuf:"bytes,2,opt,name=user" json:"user,omitempty"` Host *string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` Port *string `protobuf:"bytes,4,opt,name=port" json:"port,omitempty"` Path *string `protobuf:"bytes,5,opt,name=path" json:"path,omitempty"` Index *uint32 `protobuf:"varint,6,opt,name=index,def=0" json:"index,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *IdentityType) Reset() { *m = IdentityType{} } func (m *IdentityType) String() string { return proto.CompactTextString(m) } func (*IdentityType) ProtoMessage() {} func (*IdentityType) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } const Default_IdentityType_Index uint32 = 0 func (m *IdentityType) GetProto() string { if m != nil && m.Proto != nil { return *m.Proto } return "" } func (m *IdentityType) GetUser() string { if m != nil && m.User != nil { return *m.User } return "" } func (m *IdentityType) GetHost() string { if m != nil && m.Host != nil { return *m.Host } return "" } func (m *IdentityType) GetPort() string { if m != nil && m.Port != nil { return *m.Port } return "" } func (m *IdentityType) GetPath() string { if m != nil && m.Path != nil { return *m.Path } return "" } func (m *IdentityType) GetIndex() uint32 { if m != nil && m.Index != nil { return *m.Index } return Default_IdentityType_Index } var E_WireIn = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50002, Name: "wire_in", Tag: "varint,50002,opt,name=wire_in,json=wireIn", Filename: "types.proto", } var E_WireOut = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50003, Name: "wire_out", Tag: "varint,50003,opt,name=wire_out,json=wireOut", Filename: "types.proto", } var E_WireDebugIn = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50004, Name: "wire_debug_in", Tag: "varint,50004,opt,name=wire_debug_in,json=wireDebugIn", Filename: "types.proto", } var E_WireDebugOut = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50005, Name: "wire_debug_out", Tag: "varint,50005,opt,name=wire_debug_out,json=wireDebugOut", Filename: "types.proto", } var E_WireTiny = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50006, Name: "wire_tiny", Tag: "varint,50006,opt,name=wire_tiny,json=wireTiny", Filename: "types.proto", } var E_WireBootloader = &proto.ExtensionDesc{ ExtendedType: (*google_protobuf.EnumValueOptions)(nil), ExtensionType: (*bool)(nil), Field: 50007, Name: "wire_bootloader", Tag: "varint,50007,opt,name=wire_bootloader,json=wireBootloader", Filename: "types.proto", } func init() { proto.RegisterType((*HDNodeType)(nil), "HDNodeType") proto.RegisterType((*HDNodePathType)(nil), "HDNodePathType") proto.RegisterType((*CoinType)(nil), "CoinType") proto.RegisterType((*MultisigRedeemScriptType)(nil), "MultisigRedeemScriptType") proto.RegisterType((*TxInputType)(nil), "TxInputType") proto.RegisterType((*TxOutputType)(nil), "TxOutputType") proto.RegisterType((*TxOutputBinType)(nil), "TxOutputBinType") proto.RegisterType((*TransactionType)(nil), "TransactionType") proto.RegisterType((*TxRequestDetailsType)(nil), "TxRequestDetailsType") proto.RegisterType((*TxRequestSerializedType)(nil), "TxRequestSerializedType") proto.RegisterType((*IdentityType)(nil), "IdentityType") proto.RegisterEnum("FailureType", FailureType_name, FailureType_value) proto.RegisterEnum("OutputScriptType", OutputScriptType_name, OutputScriptType_value) proto.RegisterEnum("InputScriptType", InputScriptType_name, InputScriptType_value) proto.RegisterEnum("RequestType", RequestType_name, RequestType_value) proto.RegisterEnum("ButtonRequestType", ButtonRequestType_name, ButtonRequestType_value) proto.RegisterEnum("PinMatrixRequestType", PinMatrixRequestType_name, PinMatrixRequestType_value) proto.RegisterEnum("RecoveryDeviceType", RecoveryDeviceType_name, RecoveryDeviceType_value) proto.RegisterEnum("WordRequestType", WordRequestType_name, WordRequestType_value) proto.RegisterExtension(E_WireIn) proto.RegisterExtension(E_WireOut) proto.RegisterExtension(E_WireDebugIn) proto.RegisterExtension(E_WireDebugOut) proto.RegisterExtension(E_WireTiny) proto.RegisterExtension(E_WireBootloader) } func init() { proto.RegisterFile("types.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 1899 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0xdb, 0x72, 0x1a, 0xc9, 0x19, 0xf6, 0x00, 0x92, 0xe0, 0x07, 0xc4, 0xa8, 0x7d, 0xd0, 0x78, 0x6d, 0xaf, 0x31, 0x76, 0x62, 0x45, 0x55, 0x61, 0x77, 0xc9, 0x5a, 0x8e, 0x55, 0xa9, 0x24, 0x3a, 0xa0, 0x15, 0x65, 0x0b, 0x51, 0xc3, 0x28, 0x56, 0x72, 0x33, 0x35, 0xcc, 0xb4, 0xa0, 0x4b, 0x43, 0x37, 0xe9, 0xe9, 0x91, 0xd1, 0xde, 0xe4, 0x2a, 0xc9, 0x55, 0x5e, 0x23, 0x6f, 0x91, 0xaa, 0xbc, 0x41, 0xaa, 0x36, 0xa7, 0xcb, 0xbc, 0x41, 0xae, 0xf2, 0x00, 0xa9, 0x3e, 0x0c, 0x02, 0xc9, 0xde, 0xd2, 0x1d, 0xfd, 0x7d, 0xff, 0xf9, 0xd0, 0x3d, 0x40, 0x59, 0x5c, 0x4e, 0x70, 0xd2, 0x9c, 0x70, 0x26, 0xd8, 0x67, 0xf5, 0x21, 0x63, 0xc3, 0x18, 0x7f, 0xa1, 0x4e, 0x83, 0xf4, 0xec, 0x8b, 0x08, 0x27, 0x21, 0x27, 0x13, 0xc1, 0xb8, 0x96, 0x68, 0xfc, 0xd5, 0x02, 0x38, 0xdc, 0xef, 0xb2, 0x08, 0x7b, 0x97, 0x13, 0x8c, 0xee, 0xc1, 0x52, 0x84, 0x27, 0x62, 0xe4, 0x58, 0xf5, 0xdc, 0x46, 0xd5, 0xd5, 0x07, 0x54, 0x87, 0xf2, 0x19, 0xa1, 0x43, 0xcc, 0x27, 0x9c, 0x50, 0xe1, 0xe4, 0x14, 0x37, 0x0f, 0xa1, 0x47, 0x50, 0x0a, 0x47, 0x24, 0x8e, 0x7c, 0x9a, 0x8e, 0x9d, 0xbc, 0xe2, 0x8b, 0x0a, 0xe8, 0xa6, 0x63, 0xf4, 0x04, 0x20, 0x1c, 0x05, 0x84, 0xfa, 0x21, 0x8b, 0xb0, 0x53, 0xa8, 0xe7, 0x36, 0x2a, 0x6e, 0x49, 0x21, 0x7b, 0x2c, 0xc2, 0xe8, 0x29, 0x94, 0x27, 0x9c, 0x5c, 0x04, 0x02, 0xfb, 0xe7, 0xf8, 0xd2, 0x59, 0xaa, 0x5b, 0x1b, 0x15, 0x17, 0x0c, 0xf4, 0x16, 0x5f, 0x4a, 0xfd, 0x49, 0x3a, 0x88, 0x49, 0xa8, 0xf8, 0x65, 0xc5, 0x97, 0x34, 0xf2, 0x16, 0x5f, 0x36, 0xba, 0xb0, 0xaa, 0x33, 0xe8, 0x05, 0x62, 0xa4, 0xb2, 0x78, 0x0a, 0x05, 0x2a, 0x5d, 0xc9, 0x24, 0xca, 0xad, 0x72, 0xf3, 0x2a, 0x41, 0x57, 0x11, 0x32, 0xdc, 0x20, 0x8a, 0x38, 0x4e, 0x12, 0x9f, 0x3a, 0xb9, 0x7a, 0x5e, 0x86, 0x6b, 0x80, 0x6e, 0xe3, 0x7f, 0x39, 0x28, 0xee, 0x31, 0x42, 0x95, 0x29, 0x99, 0x18, 0x23, 0xd4, 0xa7, 0xc1, 0x58, 0xda, 0xb3, 0x36, 0x4a, 0x6e, 0x51, 0x02, 0xdd, 0x60, 0x8c, 0xd1, 0x73, 0xa8, 0x2a, 0x32, 0x19, 0x31, 0x2e, 0xc2, 0x54, 0x56, 0x46, 0x0a, 0x54, 0x24, 0xd8, 0x37, 0x18, 0x7a, 0x01, 0x95, 0xcc, 0x97, 0x6c, 0x8d, 0x93, 0xaf, 0x5b, 0x1b, 0xd5, 0x6d, 0xeb, 0x4b, 0xb7, 0x6c, 0xe0, 0xcc, 0xcf, 0x38, 0x98, 0x9e, 0x61, 0xec, 0x9f, 0x0f, 0x9c, 0x42, 0xdd, 0xda, 0x28, 0xb8, 0x45, 0x0d, 0xbc, 0x1d, 0xa0, 0x1f, 0xc3, 0xda, 0xbc, 0x09, 0x7f, 0xd2, 0x4a, 0x46, 0xaa, 0x4e, 0xd5, 0x6d, 0xeb, 0x95, 0x5b, 0x9b, 0xb3, 0xd3, 0x6b, 0x25, 0x23, 0xd4, 0x82, 0xfb, 0x09, 0x19, 0x52, 0x1c, 0xf9, 0x63, 0x9c, 0x24, 0xc1, 0x10, 0xfb, 0x23, 0x1c, 0x44, 0x98, 0x3b, 0x45, 0x15, 0xde, 0x5d, 0x4d, 0x1e, 0x69, 0xee, 0x50, 0x51, 0xe8, 0x25, 0xc0, 0x74, 0x92, 0x0e, 0xfc, 0x71, 0x30, 0x24, 0xa1, 0x53, 0x52, 0xb6, 0x8b, 0xaf, 0xb7, 0xbe, 0xdc, 0x7a, 0xfd, 0x93, 0x57, 0x3f, 0x75, 0x4b, 0x92, 0x3b, 0x92, 0x94, 0x16, 0xe4, 0x17, 0x46, 0x10, 0xae, 0x04, 0xb7, 0x5a, 0xaf, 0xb7, 0xa4, 0x20, 0xbf, 0xd0, 0x82, 0x0f, 0x60, 0x39, 0xc1, 0xc3, 0x0f, 0x44, 0x38, 0xe5, 0xba, 0xb5, 0x51, 0x74, 0xcd, 0x49, 0xe2, 0x67, 0x8c, 0x9f, 0x93, 0xc8, 0xa9, 0x48, 0x65, 0xd7, 0x9c, 0x1a, 0x09, 0x38, 0x47, 0x69, 0x2c, 0x48, 0x42, 0x86, 0x2e, 0x8e, 0x30, 0x1e, 0xf7, 0xd5, 0xa4, 0xaa, 0xea, 0xfc, 0x08, 0x56, 0x26, 0xe9, 0xe0, 0x1c, 0x5f, 0x26, 0x8e, 0x55, 0xcf, 0x6f, 0x94, 0x5b, 0xb5, 0xe6, 0x62, 0xcb, 0xdd, 0x8c, 0x47, 0x9f, 0x03, 0xc8, 0xfc, 0x02, 0x91, 0x72, 0x9c, 0xa8, 0xde, 0x56, 0xdc, 0x39, 0x04, 0x55, 0xc0, 0x1a, 0xeb, 0x1e, 0xb8, 0xd6, 0xb8, 0xf1, 0x97, 0x1c, 0x94, 0xbd, 0x69, 0x87, 0x4e, 0x52, 0x91, 0xb5, 0xe1, 0x6a, 0x30, 0xac, 0xc5, 0xc1, 0x90, 0xe4, 0x84, 0xe3, 0x0b, 0x7f, 0x14, 0x24, 0x23, 0xb5, 0x04, 0x15, 0xb7, 0x28, 0x81, 0xc3, 0x20, 0x19, 0xa9, 0x21, 0x95, 0x24, 0xa1, 0x11, 0x9e, 0x9a, 0x15, 0x50, 0xe2, 0x1d, 0x09, 0x48, 0x5a, 0x6f, 0x9e, 0x9f, 0x90, 0xa1, 0x6a, 0x70, 0xc5, 0x2d, 0x69, 0xa4, 0x4f, 0x86, 0xe8, 0x87, 0x50, 0x4c, 0xf0, 0x6f, 0x53, 0x4c, 0x43, 0x6c, 0x1a, 0x0b, 0x5f, 0xb7, 0xde, 0x7c, 0xfd, 0x66, 0xeb, 0x75, 0xeb, 0xcd, 0x2b, 0x77, 0xc6, 0xa1, 0x5f, 0x40, 0xd9, 0x98, 0x51, 0xb3, 0x24, 0x77, 0x61, 0xb5, 0x65, 0x37, 0x55, 0x02, 0x57, 0xf5, 0xda, 0xae, 0xf4, 0x7b, 0xed, 0xee, 0xfe, 0xce, 0xfe, 0xbe, 0xdb, 0xee, 0xf7, 0x5d, 0xe3, 0x59, 0x25, 0xf8, 0x0a, 0x8a, 0x63, 0x53, 0x65, 0x67, 0xa5, 0x6e, 0x6d, 0x94, 0x5b, 0x0f, 0x9b, 0x9f, 0x2a, 0xbb, 0x3b, 0x13, 0x95, 0x4d, 0x0b, 0xc6, 0x2c, 0xa5, 0x42, 0xcd, 0x50, 0xc1, 0x35, 0xa7, 0xc6, 0x7f, 0x2d, 0xa8, 0x78, 0xd3, 0xe3, 0x54, 0x64, 0x05, 0x74, 0x60, 0xc5, 0xd4, 0xcb, 0x6c, 0x4b, 0x76, 0xfc, 0xde, 0x9d, 0x9b, 0xb3, 0x2f, 0x2b, 0x37, 0xb3, 0x8f, 0x5a, 0x8b, 0xf9, 0xca, 0xbb, 0x63, 0xb5, 0xb5, 0xd6, 0xd4, 0x0e, 0xe7, 0x22, 0xfd, 0x54, 0x8a, 0x4b, 0xb7, 0x4f, 0xf1, 0x05, 0xac, 0xb2, 0x89, 0xcf, 0xb1, 0x48, 0x39, 0xf5, 0xa3, 0x40, 0x04, 0xe6, 0xa6, 0xa9, 0xb0, 0x89, 0xab, 0xc0, 0xfd, 0x40, 0x04, 0x8d, 0x2e, 0xd4, 0xb2, 0x7c, 0x77, 0xcd, 0x15, 0x71, 0x15, 0xbb, 0xb5, 0x10, 0xfb, 0x73, 0xa8, 0x9a, 0xd8, 0xf5, 0x6c, 0x9a, 0x91, 0xa9, 0x68, 0xb0, 0xa7, 0xb0, 0xc6, 0xdf, 0x72, 0x50, 0xf3, 0x78, 0x40, 0x93, 0x20, 0x14, 0x84, 0xd1, 0xac, 0x86, 0x17, 0x98, 0x27, 0x84, 0x51, 0x55, 0xc3, 0xaa, 0x9b, 0x1d, 0xd1, 0x0b, 0x58, 0x26, 0xb2, 0xd5, 0x7a, 0xb0, 0xcb, 0xad, 0x4a, 0x73, 0x6e, 0x78, 0x5d, 0xc3, 0xa1, 0xaf, 0xa0, 0x3c, 0x20, 0xd4, 0x67, 0x2a, 0xca, 0xc4, 0xc9, 0x2b, 0x51, 0xbb, 0x79, 0x2d, 0x6e, 0x17, 0x06, 0x84, 0x6a, 0x24, 0x41, 0x2f, 0x61, 0x25, 0x13, 0x5f, 0x52, 0xe2, 0xd5, 0xe6, 0x7c, 0x5b, 0xdd, 0x8c, 0x95, 0x5d, 0x8c, 0x59, 0x78, 0xee, 0x0b, 0x32, 0xc6, 0x6a, 0x8c, 0xab, 0x6e, 0x51, 0x02, 0x1e, 0x19, 0x63, 0x39, 0xe4, 0x3a, 0x04, 0x3f, 0xa4, 0x42, 0x95, 0xaf, 0xea, 0x96, 0x34, 0xb2, 0x47, 0x85, 0xbc, 0xe8, 0x8d, 0x19, 0xc5, 0xaf, 0x28, 0x1e, 0x0c, 0x24, 0x05, 0x9e, 0x00, 0xe0, 0xa9, 0xe0, 0x81, 0x2e, 0x7f, 0x51, 0x2f, 0x89, 0x42, 0x64, 0xed, 0x65, 0x87, 0xae, 0x68, 0x3f, 0xc6, 0x54, 0xdf, 0x53, 0x6e, 0x65, 0x26, 0xf2, 0x0e, 0xd3, 0xc6, 0x9f, 0x2d, 0xb8, 0xe7, 0x4d, 0x5d, 0xb9, 0x31, 0x89, 0xd8, 0xc7, 0x22, 0x20, 0xb1, 0xbe, 0x62, 0x9f, 0x43, 0x95, 0x6b, 0xd4, 0x2c, 0xa9, 0x2e, 0x6e, 0xc5, 0x80, 0x7a, 0x4f, 0xd7, 0x61, 0x45, 0x4c, 0xb3, 0x0d, 0x97, 0xfe, 0x97, 0xc5, 0x54, 0xed, 0xf7, 0x4d, 0xe7, 0xf9, 0x9b, 0xce, 0xd1, 0x26, 0xac, 0xcd, 0x49, 0xb1, 0xb3, 0xb3, 0x04, 0x0b, 0x53, 0xa6, 0xda, 0x4c, 0xf0, 0x58, 0xc1, 0x8d, 0xdf, 0x5b, 0xb0, 0x3e, 0x0b, 0xb4, 0x8f, 0x39, 0x09, 0x62, 0xf2, 0x2d, 0x8e, 0x54, 0xac, 0x2f, 0xa1, 0x36, 0xbb, 0xb3, 0x16, 0xa2, 0x5d, 0x9d, 0xc1, 0x3a, 0xde, 0xc7, 0x50, 0x9a, 0x21, 0x26, 0xe2, 0x2b, 0x40, 0x8d, 0xe0, 0xcc, 0xb0, 0x2f, 0xa6, 0x2a, 0x66, 0x39, 0x82, 0x57, 0xde, 0xa6, 0x8d, 0x3f, 0x59, 0x50, 0xe9, 0x44, 0x98, 0x0a, 0x22, 0x2e, 0xb3, 0x8f, 0x00, 0xf5, 0x71, 0x60, 0x36, 0x58, 0x1f, 0x10, 0x82, 0x42, 0x9a, 0x60, 0x6e, 0xde, 0x38, 0xf5, 0x5b, 0x62, 0x23, 0x96, 0x08, 0x65, 0xb6, 0xe4, 0xaa, 0xdf, 0x12, 0x9b, 0x30, 0xae, 0xb3, 0x2e, 0xb9, 0xea, 0xb7, 0xc2, 0x02, 0xa1, 0xdf, 0x2c, 0x89, 0x05, 0x62, 0x84, 0xd6, 0x61, 0x49, 0x27, 0xb6, 0x9c, 0x3d, 0x88, 0xfa, 0xbc, 0xf9, 0x5d, 0x0e, 0xca, 0x07, 0x01, 0x89, 0x53, 0xae, 0xbf, 0x49, 0x9e, 0xc0, 0x43, 0x73, 0xf4, 0x4f, 0x28, 0x9e, 0x4e, 0x70, 0x28, 0x66, 0xaf, 0x97, 0x6d, 0xa1, 0xcf, 0xe0, 0x41, 0x46, 0xef, 0xa6, 0x42, 0x30, 0xda, 0x36, 0x22, 0x76, 0x0e, 0xdd, 0x87, 0xb5, 0x8c, 0x93, 0x85, 0x6f, 0x73, 0xce, 0xb8, 0x9d, 0x47, 0x8f, 0x60, 0x3d, 0x83, 0x77, 0xd4, 0xda, 0xed, 0x05, 0x34, 0xc4, 0x71, 0x8c, 0x23, 0xbb, 0x80, 0xd6, 0xe1, 0x6e, 0x46, 0xf6, 0xc8, 0x95, 0xb1, 0x25, 0xe4, 0xc0, 0xbd, 0x39, 0xe2, 0x4a, 0x65, 0x19, 0x3d, 0x00, 0x34, 0xc7, 0x74, 0xe8, 0x45, 0x10, 0x93, 0xc8, 0x5e, 0x41, 0x8f, 0xc1, 0xc9, 0x70, 0x03, 0xf6, 0xb3, 0xd6, 0xd8, 0xc5, 0x05, 0x7b, 0x9c, 0x85, 0x38, 0x49, 0x74, 0x7c, 0xa5, 0xf9, 0x94, 0xba, 0x4c, 0xb4, 0x29, 0x4b, 0x87, 0xa3, 0x83, 0x94, 0x46, 0x89, 0x0d, 0xd7, 0xb8, 0x0e, 0x25, 0xc2, 0x74, 0xd2, 0x2e, 0xa3, 0x87, 0x70, 0x3f, 0xe3, 0x0e, 0x08, 0x1f, 0x7f, 0x08, 0x38, 0xd6, 0x26, 0xc3, 0xcd, 0x3f, 0x5a, 0x60, 0x5f, 0xbf, 0x35, 0x91, 0x0d, 0x95, 0xde, 0xce, 0xaf, 0xbd, 0x63, 0xf3, 0x50, 0xd8, 0x77, 0xd0, 0x5d, 0xa8, 0x29, 0xa4, 0xbf, 0xe7, 0x76, 0x7a, 0xde, 0xe1, 0x4e, 0xff, 0xd0, 0xb6, 0xd0, 0x1a, 0x54, 0x15, 0x78, 0x74, 0xf2, 0xce, 0xeb, 0xf4, 0x3b, 0xdf, 0xd8, 0xb9, 0x19, 0x74, 0xdc, 0x73, 0xdb, 0xde, 0x89, 0xdb, 0xb5, 0xf3, 0x33, 0x63, 0xef, 0x3b, 0x5e, 0x57, 0x1a, 0x2b, 0xa0, 0x7b, 0x60, 0x2b, 0xa4, 0xd7, 0xea, 0x1f, 0x66, 0xe8, 0xd2, 0x66, 0x0c, 0xb5, 0x6b, 0xcf, 0x95, 0x54, 0x9d, 0x7f, 0xb0, 0xec, 0x3b, 0xd2, 0xbe, 0x42, 0x66, 0x2e, 0x2d, 0x54, 0x81, 0x62, 0xfb, 0xd4, 0x6b, 0xbb, 0xdd, 0x9d, 0x77, 0x76, 0x6e, 0xa6, 0x92, 0xd9, 0xcd, 0x4b, 0x6f, 0x0a, 0x99, 0xf7, 0x56, 0xd8, 0x3c, 0x81, 0xb2, 0xd9, 0x30, 0xe5, 0xa9, 0x0c, 0x2b, 0xde, 0x69, 0xa7, 0xdb, 0x3b, 0xf1, 0xec, 0x3b, 0xd2, 0xa2, 0x77, 0x7a, 0x7c, 0xe2, 0xc9, 0x93, 0x85, 0x00, 0x96, 0xbd, 0xd3, 0xa3, 0xb6, 0xb7, 0x63, 0xe7, 0xd0, 0x2a, 0x80, 0x77, 0x7a, 0xd0, 0xe9, 0x76, 0xfa, 0x87, 0xed, 0x7d, 0x3b, 0x8f, 0x6a, 0x50, 0xf6, 0x4e, 0xdb, 0xa7, 0x9e, 0xbb, 0xb3, 0xbf, 0xe3, 0xed, 0xd8, 0x85, 0xcd, 0xff, 0xe4, 0x60, 0x4d, 0x4f, 0xdb, 0xbc, 0xf5, 0x75, 0xb8, 0xbb, 0x00, 0xfa, 0xc7, 0x62, 0x84, 0xb9, 0x6d, 0xa1, 0x06, 0x7c, 0xbe, 0x48, 0x1c, 0x60, 0x7c, 0x7c, 0x81, 0xb9, 0x37, 0xe2, 0x38, 0x19, 0xb1, 0x58, 0xce, 0xea, 0x53, 0x78, 0xb4, 0x28, 0xb3, 0xc7, 0xe8, 0x19, 0xe1, 0x63, 0xdd, 0x35, 0x3b, 0x2f, 0xf7, 0x60, 0x51, 0xc0, 0xc5, 0x09, 0x16, 0xfb, 0xf8, 0x82, 0x84, 0xd8, 0x2e, 0xdc, 0xa4, 0x8d, 0xfe, 0x7b, 0xc6, 0xe5, 0xf4, 0x3e, 0x06, 0x67, 0x91, 0x7e, 0x4f, 0x26, 0xd8, 0x28, 0x2f, 0xdf, 0x54, 0xee, 0x71, 0x26, 0x70, 0x28, 0xf6, 0x82, 0x38, 0xb6, 0x57, 0xe4, 0xa8, 0x2e, 0xd2, 0x72, 0x8e, 0xbd, 0xa9, 0x5d, 0xbc, 0x19, 0x75, 0x36, 0x78, 0x7b, 0x23, 0x1c, 0x9e, 0xdb, 0x25, 0x39, 0x93, 0x8b, 0x02, 0x3b, 0xfa, 0xcd, 0xb7, 0x41, 0xae, 0xe1, 0x35, 0xa7, 0xd9, 0x37, 0xbd, 0x5d, 0xde, 0xfc, 0x1d, 0xdc, 0xeb, 0x11, 0x7a, 0x14, 0x08, 0x4e, 0xa6, 0xf3, 0x35, 0xae, 0xc3, 0xe3, 0x8f, 0xe1, 0xfe, 0x5e, 0xca, 0x39, 0xa6, 0xc2, 0xb6, 0xd0, 0x33, 0x78, 0xf2, 0x51, 0x89, 0x2e, 0xfe, 0x70, 0x40, 0x78, 0x22, 0xec, 0x9c, 0xec, 0xc7, 0xa7, 0x44, 0xfa, 0x38, 0x64, 0x34, 0xb2, 0xf3, 0x9b, 0xbf, 0x01, 0xe4, 0xe2, 0x90, 0x5d, 0x60, 0x7e, 0xa9, 0xcb, 0xa4, 0xdc, 0xff, 0x00, 0x9e, 0xdd, 0x44, 0xfd, 0x7e, 0xc8, 0x83, 0xf1, 0x20, 0xc6, 0x91, 0x2c, 0x76, 0x62, 0xdf, 0x91, 0xf5, 0xfc, 0x88, 0x98, 0x76, 0x68, 0x5b, 0x9b, 0x67, 0x50, 0x93, 0x92, 0xf3, 0x79, 0x3d, 0x84, 0xfb, 0xd7, 0x20, 0xbf, 0x17, 0x07, 0x84, 0xda, 0x77, 0x64, 0x9d, 0xae, 0x53, 0xda, 0xd2, 0x1b, 0xdb, 0xfa, 0x34, 0xb9, 0x65, 0xe7, 0xb6, 0x7f, 0x06, 0x2b, 0x1f, 0x88, 0x7a, 0x41, 0xd0, 0xb3, 0xa6, 0xfe, 0x2f, 0xd8, 0xcc, 0xfe, 0x0b, 0x36, 0xdb, 0x34, 0x1d, 0xff, 0x2a, 0x88, 0x53, 0x7c, 0x3c, 0x91, 0x77, 0x60, 0xe2, 0x7c, 0xf7, 0x87, 0xbc, 0xfe, 0x52, 0x97, 0x3a, 0x1d, 0xba, 0xfd, 0x73, 0x28, 0x2a, 0x6d, 0x96, 0x8a, 0xdb, 0xa8, 0xff, 0xdd, 0xa8, 0x2b, 0x97, 0xc7, 0xa9, 0xd8, 0xfe, 0x06, 0xaa, 0x4a, 0x3f, 0xc2, 0x83, 0x74, 0x78, 0xcb, 0x18, 0xfe, 0x61, 0x8c, 0x94, 0xa5, 0xe6, 0xbe, 0x54, 0xec, 0xd0, 0xed, 0x0e, 0xac, 0xce, 0x19, 0xba, 0x65, 0x38, 0xff, 0x34, 0x96, 0x2a, 0x33, 0x4b, 0x32, 0xa6, 0x5f, 0x42, 0x49, 0x99, 0x12, 0x84, 0x5e, 0xde, 0xc6, 0xca, 0xbf, 0x8c, 0x15, 0x55, 0x09, 0x8f, 0xd0, 0xcb, 0xed, 0x77, 0x50, 0x53, 0x16, 0x06, 0x8c, 0x89, 0x98, 0xa9, 0x3f, 0x4f, 0xb7, 0xb0, 0xf3, 0x6f, 0x63, 0x47, 0x25, 0xb2, 0x3b, 0x53, 0xdd, 0xfd, 0x0a, 0x9e, 0x87, 0x6c, 0xdc, 0x4c, 0x02, 0xc1, 0x92, 0x11, 0x89, 0x83, 0x41, 0xd2, 0x14, 0x1c, 0x7f, 0xcb, 0x78, 0x33, 0x26, 0x83, 0x99, 0xbd, 0x5d, 0xf0, 0x14, 0x28, 0xdb, 0xfb, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x70, 0x88, 0xcd, 0x71, 0xe2, 0x0f, 0x00, 0x00, }
// for backward compatibility; there is no way to not support it. //
genModelsRecurrent_v2.py
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 ## # Copyright (C) 2021 Jihoon Lee <[email protected]> # # @file genModelsRecurrent_v2.py # @date 19 October 2021 # @brief Generate recurrent model tcs # @author Jihoon lee <[email protected]> from recorder_v2 import record_v2, inspect_file from zoneout import Zoneout import torch class FCUnroll(torch.nn.Module): def __init__(self, unroll_for=1, num_fc=1): super().__init__() self.fcs = torch.nn.ModuleList([torch.nn.Linear(1, 1) for i in range(num_fc)]) self.unroll_for = unroll_for # self.loss = torch.nn.MSELoss() self.loss = torch.nn.Identity() def forward(self, inputs, labels): output = inputs[0] for i in range(self.unroll_for): for fc in self.fcs: output = fc(output) loss = self.loss(output) # loss = self.loss(output, labels[0]) return output, loss class RNNCellStacked(torch.nn.Module): def
(self, unroll_for=1, num_rnn=1, input_size=1, hidden_size=1): super().__init__() self.rnns = torch.nn.ModuleList( [ torch.nn.RNNCell(input_size, hidden_size) for _ in range(num_rnn) ] ) self.unroll_for = unroll_for self.loss = torch.nn.MSELoss() def forward(self, inputs, labels): hs = [torch.zeros_like(inputs[0]) for _ in self.rnns] out = inputs[0] ret = [] for _ in range(self.unroll_for): for i, rnn in enumerate(self.rnns): hs[i] = rnn(out, hs[i]) out = hs[i] ret.append(out) ret = torch.stack(ret, dim=1) loss = self.loss(ret, labels[0]) return ret, loss class LSTMStacked(torch.nn.Module): def __init__(self, num_lstm=1, bidirectional=False): super().__init__() self.input_size = self.hidden_size = 2 self.num_lstm = num_lstm self.bidirectional=bidirectional self.lstms = torch.nn.ModuleList( [ torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, batch_first=True, bidirectional=bidirectional) # Intended comment # torch.nn.LSTM(self.input_size if self.bidirectional == False or i == 0 else 2 * self.input_size, self.hidden_size, num_layers=num_lstm, batch_first=True, bidirectional=bidirectional) for i in range(num_lstm) ] ) self.loss = torch.nn.MSELoss() def forward(self, inputs, labels): out = inputs[0] states = inputs[1:] # hs = [states[2 * i] for i in range(self.num_lstm)] hs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)] # cs = [states[2 * i + 1] for i in range(self.num_lstm)] cs = [torch.zeros((2, 3, 2)) if self.bidirectional else torch.zeros((1, 3, 2)) for _ in range(self.num_lstm)] for i, (lstm, h, c) in enumerate(zip(self.lstms, hs, cs)): out, (hs[i], cs[i]) = lstm(out, (h, c)) loss = self.loss(out, labels[0]) return out, loss class LSTMCellStacked(torch.nn.Module): def __init__(self, unroll_for=2, num_lstmcell=1): super().__init__() self.input_size = self.hidden_size = 2 self.lstmcells = torch.nn.ModuleList( [ torch.nn.LSTMCell(self.input_size, self.hidden_size) for _ in range(num_lstmcell) ] ) self.unroll_for = unroll_for self.num_lstmcell = num_lstmcell self.loss = torch.nn.MSELoss() def forward(self, inputs, labels): out = inputs[0] states = inputs[1:] hs = [states[2 * i] for i in range(self.num_lstmcell)] cs = [states[2 * i + 1] for i in range(self.num_lstmcell)] ret = [] for _ in range(self.unroll_for): for i, (lstm, h, c) in enumerate(zip(self.lstmcells, hs, cs)): hs[i], cs[i] = lstm(out, (h, c)) out = hs[i] ret.append(out) ret = torch.stack(ret, dim=1) loss = self.loss(ret, labels[0]) return ret, loss class ZoneoutLSTMStacked(torch.nn.Module): def __init__(self, batch_size=3, unroll_for=2, num_lstm=1, hidden_state_zoneout_rate=1, cell_state_zoneout_rate=1): super().__init__() self.input_size = self.hidden_size = 2 self.cell_state_zoneout_rate = cell_state_zoneout_rate self.zoneout_lstms = torch.nn.ModuleList( [ Zoneout(batch_size, self.input_size, self.hidden_size, unroll_for, hidden_state_zoneout_rate, cell_state_zoneout_rate) for _ in range(num_lstm) ] ) self.unroll_for = unroll_for self.num_lstm = num_lstm self.loss = torch.nn.MSELoss() def forward(self, inputs, labels): out = inputs[0] states = inputs[1:] hs = [states[2 * i] for i in range(self.num_lstm)] cs = [states[2 * i + 1] for i in range(self.num_lstm)] ret = [] for num_unroll in range(self.unroll_for): for i, (zoneout_lstm, h, c) in enumerate(zip(self.zoneout_lstms, hs, cs)): hs[i], cs[i] = zoneout_lstm(out, (h, c, num_unroll)) out = hs[i] ret.append(out) ret = torch.stack(ret, dim=1) loss = self.loss(ret, labels[0]) return ret, loss class GRUCellStacked(torch.nn.Module): def __init__(self, unroll_for=2, num_grucell=1): super().__init__() self.input_size = self.hidden_size = 2 self.grus = torch.nn.ModuleList( [ torch.nn.GRUCell(self.input_size, self.hidden_size, bias=True) for _ in range(num_grucell) ] ) self.unroll_for = unroll_for self.loss = torch.nn.MSELoss() def forward(self, inputs, labels): out = inputs[0] hs = inputs[1:] ret = [] for _ in range(self.unroll_for): for i, (gru, h) in enumerate(zip(self.grus, hs)): hs[i] = gru(out, h) out = hs[i] ret.append(out) ret = torch.stack(ret, dim=1) loss = self.loss(ret, labels[0]) return ret, loss if __name__ == "__main__": record_v2( FCUnroll(unroll_for=5), iteration=2, input_dims=[(1,)], label_dims=[(1,)], name="fc_unroll_single", ) record_v2( FCUnroll(unroll_for=2, num_fc=2), iteration=2, input_dims=[(1,)], label_dims=[(1,)], name="fc_unroll_stacked", ) record_v2( FCUnroll(unroll_for=2, num_fc=2), iteration=2, input_dims=[(1,)], label_dims=[(1,)], name="fc_unroll_stacked_clipped", clip=True ) record_v2( RNNCellStacked(unroll_for=2, num_rnn=1, input_size=2, hidden_size=2), iteration=2, input_dims=[(3, 2)], label_dims=[(3, 2, 2)], name="rnncell_single", ) record_v2( RNNCellStacked(unroll_for=2, num_rnn=2, input_size=2, hidden_size=2), iteration=2, input_dims=[(3, 2)], label_dims=[(3, 2, 2)], name="rnncell_stacked", ) unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, False] record_v2( LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional), iteration=iteration, input_dims=[(batch_size, unroll_for, feature_size)], # input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="lstm_single", ) unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, False] record_v2( LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional), iteration=iteration, input_dims=[(batch_size, unroll_for, feature_size)], # input_dims=[(batch_size, unroll_for, feature_size)] + [(1, batch_size, unit) for _ in range(2 * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="lstm_stacked", ) unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 1, 3, 2, 2, 2, True] record_v2( LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional), iteration=iteration, input_dims=[(batch_size, unroll_for, feature_size)], # input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)], label_dims=[(batch_size, unroll_for, 2 * unit)], name="bidirectional_lstm_single", ) unroll_for, num_lstm, batch_size, unit, feature_size, iteration, bidirectional = [2, 2, 3, 2, 2, 2, True] record_v2( LSTMStacked(num_lstm=num_lstm, bidirectional=bidirectional), iteration=iteration, input_dims=[(batch_size, unroll_for, feature_size)], # input_dims=[(batch_size, unroll_for, feature_size)] + [(2, batch_size, unit) for _ in range(2 * num_lstm)], label_dims=[(batch_size, unroll_for, 2 * unit)], name="bidirectional_lstm_stacked", ) unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 1, 2, 3, 2, 2, 2] record_v2( LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)], label_dims=[(batch_size, unroll_for, unit)], name="lstmcell_single", ) unroll_for, num_lstmcell, state_num, batch_size, unit, feature_size, iteration = [2, 2, 2, 3, 2, 2, 2] record_v2( LSTMCellStacked(unroll_for=unroll_for, num_lstmcell=num_lstmcell), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstmcell)], label_dims=[(batch_size, unroll_for, unit)], name="lstmcell_stacked", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_000_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_000_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_050_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_050_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_100_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_100_000", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_000_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_000_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_050_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_050_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_100_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 0.5] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_100_050", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.0, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_000_100", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.0, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_000_100", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 0.5, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_050_100", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 0.5, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_050_100", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 1, 2, 1, 2, 2, 2, 1.0, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_single_100_100", ) unroll_for, num_lstm, state_num, batch_size, unit, feature_size, iteration, hidden_state_zoneout_rate, cell_state_zoneout_rate = [2, 2, 2, 1, 2, 2, 2, 1.0, 1.0] record_v2( ZoneoutLSTMStacked(batch_size=batch_size, unroll_for=unroll_for, num_lstm=num_lstm, hidden_state_zoneout_rate=hidden_state_zoneout_rate, cell_state_zoneout_rate=cell_state_zoneout_rate), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(state_num * num_lstm)], label_dims=[(batch_size, unroll_for, unit)], name="zoneout_lstm_stacked_100_100", ) unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 1, 3, 2, 2, 2] record_v2( GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)], label_dims=[(batch_size, unroll_for, unit)], name="grucell_single", ) unroll_for, num_grucell, batch_size, unit, feature_size, iteration, = [2, 2, 3, 2, 2, 2] record_v2( GRUCellStacked(unroll_for=unroll_for, num_grucell=num_grucell), iteration=iteration, input_dims=[(batch_size, feature_size)] + [(batch_size, unit) for _ in range(num_grucell)], label_dims=[(batch_size, unroll_for, unit)], name="grucell_stacked", ) # inspect_file("lstm_single.nnmodelgolden")
__init__
sql_isolation_testcase.py
""" Copyright (c) 2004-Present VMware, Inc. or its affiliates. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import pg import os import subprocess import re import multiprocessing import tempfile import time import sys import socket from optparse import OptionParser import traceback def is_digit(n): try: int(n) return True except ValueError: return False def null_notice_receiver(notice): ''' Tests ignore notice messages when analyzing results, so silently drop notices from the pg.connection ''' return class SQLIsolationExecutor(object): def __init__(self, dbname=''): self.processes = {} # The re.S flag makes the "." in the regex match newlines. # When matched against a command in process_command(), all # lines in the command are matched and sent as SQL query. self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>USq]*?)\:(.*)", re.S) if dbname: self.dbname = dbname else: self.dbname = os.environ.get('PGDATABASE') class SQLConnection(object): def __init__(self, out_file, name, mode, dbname): self.name = name self.mode = mode self.out_file = out_file self.dbname = dbname parent_conn, child_conn = multiprocessing.Pipe(True) self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,)) self.pipe = parent_conn self.has_open = False self.p.start() # Close "our" copy of the child's handle, so that if the child dies, # recv() on the pipe will fail. child_conn.close(); self.out_file = out_file def session_process(self, pipe): sp = SQLIsolationExecutor.SQLSessionProcess(self.name, self.mode, pipe, self.dbname) sp.do() def query(self, command): print(file=self.out_file) self.out_file.flush() if len(command.strip()) == 0: return if self.has_open: raise Exception("Cannot query command while waiting for results") self.pipe.send((command, False)) r = self.pipe.recv() if r is None: raise Exception("Execution failed") print(r.rstrip(), file=self.out_file) def fork(self, command, blocking): print(" <waiting ...>", file=self.out_file) self.pipe.send((command, True)) if blocking: time.sleep(0.5) if self.pipe.poll(0): p = self.pipe.recv() raise Exception("Forked command is not blocking; got output: %s" % p.strip()) self.has_open = True def join(self): r = None print(" <... completed>", file=self.out_file) if self.has_open: r = self.pipe.recv() if r is None: raise Exception("Execution failed") print(r.rstrip(), file=self.out_file) self.has_open = False def stop(self): self.pipe.send(("", False)) self.p.join() if self.has_open: raise Exception("Should not finish test case while waiting for results") def quit(self): print(" ... <quitting>", file=self.out_file) self.stop() def terminate(self): self.pipe.close() self.p.terminate() class SQLSessionProcess(object): def __init__(self, name, mode, pipe, dbname): """ Constructor """ self.name = name self.mode = mode self.pipe = pipe self.dbname = dbname if self.mode == "utility": (hostname, port) = self.get_hostname_port(name, 'p') self.con = self.connectdb(given_dbname=self.dbname, given_host=hostname, given_port=port, given_opt="-c gp_role=utility") elif self.mode == "standby": # Connect to standby even when it's role is recorded # as mirror. This is useful for scenarios where a # test needs to promote a standby without using # gpactivatestandby. (hostname, port) = self.get_hostname_port(name, 'm') self.con = self.connectdb(given_dbname=self.dbname, given_host=hostname, given_port=port) else: self.con = self.connectdb(self.dbname) def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None): con = None retry = 1000 while retry: try: if (given_port is None): con = pg.connect(host= given_host, opt= given_opt, dbname= given_dbname) else: con = pg.connect(host= given_host, port= given_port, opt= given_opt, dbname= given_dbname) break except Exception as e: if (("the database system is starting up" in str(e) or "the database system is in recovery mode" in str(e)) and retry > 1): retry -= 1 time.sleep(0.1) else: raise con.set_notice_receiver(null_notice_receiver) return con def get_hostname_port(self, contentid, role): """ Gets the port number/hostname combination of the contentid and role """ query = ("SELECT hostname, port FROM gp_segment_configuration WHERE" " content = %s AND role = '%s'") % (contentid, role) con = self.connectdb(self.dbname, given_opt="-c gp_role=utility") r = con.query(query).getresult() con.close() if len(r) == 0: raise Exception("Invalid content %s" % contentid) if r[0][0] == socket.gethostname(): return (None, int(r[0][1])) return (r[0][0], int(r[0][1])) def printout_result(self, r): """ Print out a pygresql result set (a Query object, after the query has been executed), in a format that imitates the default formatting of psql. This isn't a perfect imitation: we left-justify all the fields and headers, whereas psql centers the header, and right-justifies numeric fields. But this is close enough, to make gpdiff.pl recognize the result sets as such. (We used to just call str(r), and let PyGreSQL do the formatting. But even though PyGreSQL's default formatting is close to psql's, it's not close enough.) """ widths = [] # Figure out the widths of each column. fields = r.listfields() for f in fields: widths.append(len(str(f))) rset = r.getresult() for row in rset: colno = 0 for col in row: if col is None: col = "" widths[colno] = max(widths[colno], len(str(col))) colno = colno + 1 # Start printing. Header first. result = "" colno = 0 for f in fields: if colno > 0: result += "|" result += " " + f.ljust(widths[colno]) + " " colno = colno + 1 result += "\n" # Then the bar ("----+----") colno = 0 for f in fields: if colno > 0: result += "+" result += "".ljust(widths[colno] + 2, "-") colno = colno + 1 result += "\n" # Then the result set itself for row in rset: colno = 0 for col in row: if colno > 0: result += "|" if isinstance(col, float): col = format(col, "g") elif isinstance(col, bool): if col: col = 't' else: col = 'f' elif col is None: col = "" result += " " + str(col).ljust(widths[colno]) + " " colno = colno + 1 result += "\n" # Finally, the row count if len(rset) == 1: result += "(1 row)\n" else: result += "(" + str(len(rset)) + " rows)\n" return result def execute_command(self, command): """ Executes a given command """ try: r = self.con.query(command) if r is not None: if type(r) == str: # INSERT, UPDATE, etc that returns row count but not result set echo_content = command[:-1].partition(" ")[0].upper() return "%s %s" % (echo_content, r) else: # SELECT or similar, print the result set without the command (type pg.Query) return self.printout_result(r) else: # CREATE or other DDL without a result set or count echo_content = command[:-1].partition(" ")[0].upper() return echo_content except Exception as e: return str(e) def do(self): """ Process loop. Ends when the command None is received """ (c, wait) = self.pipe.recv() while c: if wait: time.sleep(0.1) r = self.execute_command(c) self.pipe.send(r) r = None (c, wait) = self.pipe.recv() def get_process(self, out_file, name, mode="", dbname=""): """ Gets or creates the process by the given name """ if len(name) > 0 and not is_digit(name): raise Exception("Name should be a number") if len(name) > 0 and mode != "utility" and int(name) >= 1024: raise Exception("Session name should be smaller than 1024 unless it is utility mode number") if not (name, mode) in self.processes: if not dbname: dbname = self.dbname self.processes[(name, mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, mode, dbname) return self.processes[(name, mode)] def quit_process(self, out_file, name, mode="", dbname=""): """ Quits a process with the given name """ if len(name) > 0 and not is_digit(name): raise Exception("Name should be a number") if len(name) > 0 and mode != "utility" and int(name) >= 1024: raise Exception("Session name should be smaller than 1024 unless it is utility mode number") if not (name, mode) in self.processes: raise Exception("Sessions not started cannot be quit") self.processes[(name, mode)].quit() del self.processes[(name, mode)] def
(self, dbname): """ Retrieves all primary content IDs (including the master). Intended for use by *U queries. """ if not dbname: dbname = self.dbname con = pg.connect(dbname=dbname) result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult() if len(result) == 0: raise Exception("Invalid gp_segment_configuration contents") return [int(content[0]) for content in result] def process_command(self, command, output_file): """ Processes the given command. The command at this point still includes the isolation behavior flags, e.g. which session to use. """ process_name = "" sql = command flag = "" con_mode = "" dbname = "" m = self.command_pattern.match(command) if m: process_name = m.groups()[0] flag = m.groups()[1] if flag and flag[0] == "U": con_mode = "utility" elif flag and flag[0] == "S": if len(flag) > 1: flag = flag[1:] con_mode = "standby" sql = m.groups()[2] sql = sql.lstrip() # If db_name is specifed , it should be of the following syntax: # 1:@db_name <db_name>: <sql> if sql.startswith('@db_name'): sql_parts = sql.split(':', 2) if not len(sql_parts) == 2: raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>") if not sql_parts[0].startswith('@db_name'): raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>") if not len(sql_parts[0].split()) == 2: raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>") dbname = sql_parts[0].split()[1].strip() if not dbname: raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>") sql = sql_parts[1] if not flag: if sql.startswith('!'): sql = sql[1:] # Check for execution mode. E.g. # !\retcode path/to/executable --option1 --option2 ... # # At the moment, we only recognize the \retcode mode, which # ignores all program output in the diff (it's still printed) # and adds the return code. mode = None if sql.startswith('\\'): mode, sql = sql.split(None, 1) if mode != '\\retcode': raise Exception('Invalid execution mode: {}'.format(mode)) cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True) stdout, _ = cmd_output.communicate() print(file=output_file) if mode == '\\retcode': print('-- start_ignore', file=output_file) print(stdout.decode(), file=output_file) if mode == '\\retcode': print('-- end_ignore', file=output_file) print('(exited with code {})'.format(cmd_output.returncode), file=output_file) else: self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip()) elif flag == "&": self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True) elif flag == ">": self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), False) elif flag == "<": if len(sql) > 0: raise Exception("No query should be given on join") self.get_process(output_file, process_name, con_mode, dbname=dbname).join() elif flag == "q": if len(sql) > 0: raise Exception("No query should be given on quit") self.quit_process(output_file, process_name, con_mode, dbname=dbname) elif flag == "U": if process_name == '*': process_names = [str(content) for content in self.get_all_primary_contentids(dbname)] else: process_names = [process_name] for name in process_names: self.get_process(output_file, name, con_mode, dbname=dbname).query(sql.strip()) elif flag == "U&": self.get_process(output_file, process_name, con_mode, dbname=dbname).fork(sql.strip(), True) elif flag == "U<": if len(sql) > 0: raise Exception("No query should be given on join") self.get_process(output_file, process_name, con_mode, dbname=dbname).join() elif flag == "Uq": if len(sql) > 0: raise Exception("No query should be given on quit") self.quit_process(output_file, process_name, con_mode, dbname=dbname) elif flag == "S": self.get_process(output_file, process_name, con_mode, dbname=dbname).query(sql.strip()) else: raise Exception("Invalid isolation flag") def process_isolation_file(self, sql_file, output_file): """ Processes the given sql file and writes the output to output file """ try: command = "" newline = False for line in sql_file: # this logic replicates the python2 behavior of a trailing comma at the end of print # i.e. ''' print >>output_file, line.strip(), ''' print((" " if command and not newline else "") + line.strip(), end="", file=output_file) newline = False if line[0] == "!": command_part = line # shell commands can use -- for multichar options like --include elif re.match(r";.*--", line) or re.match(r"^--", line): command_part = line.partition("--")[0] # remove comment from line else: command_part = line if command_part == "" or command_part == "\n": print(file=output_file) newline = True elif re.match(r".*;\s*$", command_part) or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+[SU][q\\<]:$", line): command += command_part try: self.process_command(command, output_file) except Exception as e: print("FAILED: ", e, file=output_file) command = "" else: command += command_part for process in list(self.processes.values()): process.stop() except: for process in list(self.processes.values()): process.terminate() raise finally: for process in list(self.processes.values()): process.terminate() class SQLIsolationTestCase: """ The isolation test case allows a fine grained control of interleaved executing transactions. This is mainly used to test isolation behavior. [<#>[flag]:] <sql> | ! <shell scripts or command> #: either an integer indicating a unique session, or a content-id if followed by U (for utility-mode connections). In 'U' mode, the content-id can alternatively be an asterisk '*' to perform a utility-mode query on the master and all primaries. flag: &: expect blocking behavior >: running in background without blocking <: join an existing session q: quit the given session U: connect in utility mode to primary contentid from gp_segment_configuration U&: expect blocking behavior in utility mode (does not currently support an asterisk target) U<: join an existing utility mode session (does not currently support an asterisk target) An example is: Execute BEGIN in transaction 1 Execute BEGIN in transaction 2 Execute INSERT in transaction 2 Execute SELECT in transaction 1 Execute COMMIT in transaction 2 Execute SELECT in transaction 1 The isolation tests are specified identical to sql-scripts in normal SQLTestCases. However, it is possible to prefix a SQL line with an tranaction identifier followed by a colon (":"). The above example would be defined by 1: BEGIN; 2: BEGIN; 2: INSERT INTO a VALUES (1); 1: SELECT * FROM a; 2: COMMIT; 1: SELECT * FROM a; Blocking behavior can be tested by forking and joining. 1: BEGIN; 2: BEGIN; 1: DELETE FROM foo WHERE a = 4; 2&: DELETE FROM foo WHERE a = 4; 1: COMMIT; 2<: 2: COMMIT; 2& forks the command. It is executed in the background. If the command is NOT blocking at this point, it is considered an error. 2< joins the background command and outputs the result of the command execution. Session ids should be smaller than 1024. 2U: Executes a utility command connected to port 40000. One difference to SQLTestCase is the output of INSERT. SQLTestCase would output "INSERT 0 1" if one tuple is inserted. SQLIsolationTestCase would output "INSERT 1". As the SQLIsolationTestCase needs to have a more fine-grained control over the execution order than possible with PSQL, it uses the pygresql python library instead. Connecting to a specific database: 1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions. 2. If you want a specific session to be connected to a specific database , specify the sql as follows: 1:@db_name testdb: <sql> 2:@db_name test2db: <sql> 1: <sql> 2: <sql> etc Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out: 1:@db_name testdb: <sql> 2:@db_name test2db: <sql> 1: @db_name testdb: <sql> 2: <sql> etc Quitting sessions: By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg: 1:@db_name testdb: <sql> 2:@db_name test2db: <sql> 1: <sql> 2: <sql> 1q: 2: <sql> 3: <sql> 2q: 3: <sql> 2: @db_name test: <sql> 1q: ---> Will quit the session established with testdb. 2q: ---> Will quit the session established with test2db. The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session. Catalog Modification: Some tests are easier to write if it's possible to modify a system catalog across the *entire* cluster. To perform a utility-mode query on all segments and the master, you can use *U commands: *U: SET allow_system_table_mods = true; *U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>; Since the number of query results returned by a *U command depends on the developer's cluster configuration, it can be useful to wrap them in a start_/end_ignore block. (Unfortunately, this also hides legitimate failures; a better long-term solution is needed.) Block/join flags are not currently supported with *U. Line continuation: If a line is not ended by a semicolon ';' which is followed by 0 or more spaces, the line will be combined with next line and sent together as a single statement. e.g.: Send to the server separately: 1: SELECT * FROM t1; -> send "SELECT * FROM t1;" SELECT * FROM t2; -> send "SELECT * FROM t2;" e.g.: Send to the server once: 1: SELECT * FROM t1; SELECT * FROM t2; -> "send SELECT * FROM t1; SELECT * FROM t2;" ATTENTION: Send multi SQL statements once: Multi SQL statements can be sent at once, but there are some known issues. Generally only the last query result will be printed. But due to the difficulties of dealing with semicolons insides quotes, we always echo the first SQL command instead of the last one if query() returns None. This created some strange issues like: CREATE TABLE t1 (a INT); INSERT INTO t1 SELECT generate_series(1,1000); CREATE 1000 (Should be INSERT 1000, but here the CREATE is taken due to the limitation) """ def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None): """ Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql against the test case database (self.db_name) and verifies the output with the ans file. If an 'init_file' exists in the same location as the sql_file, this will be used while doing gpdiff. """ # Add gucs to the test sql and form the actual sql file to be run if not out_dir: out_dir = self.get_out_dir() if not os.path.exists(out_dir): TINCSystem.make_dirs(out_dir, ignore_exists_error = True) if optimizer is None: gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file)) else: # sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer))) self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer) self.test_artifacts.append(gucs_sql_file) if not out_file: if optimizer is None: out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out')) else: # out file will be *_opt.out or *_planner.out based on optimizer out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer))) self.test_artifacts.append(out_file) executor = SQLIsolationExecutor(dbname=self.db_name) with open(out_file, "w") as f: executor.process_isolation_file(open(sql_file), f) f.flush() if out_file[-2:] == '.t': out_file = out_file[:-2] return out_file if __name__ == "__main__": parser = OptionParser() parser.add_option("--dbname", dest="dbname", help="connect to database DBNAME", metavar="DBNAME") (options, args) = parser.parse_args() executor = SQLIsolationExecutor(dbname=options.dbname) executor.process_isolation_file(sys.stdin, sys.stdout)
get_all_primary_contentids
preprocessor.py
import numpy as np # from scipy.misc import imread, imresize from scipy import misc def preprocess_input(x, v2=True): x = x.astype('float32') x = x / 255.0 if v2: x = x - 0.5 x = x * 2.0 return x def _imread(image_name): return misc.imread(image_name) def _imresize(image_array, size): return misc.imresize(image_array, size) def to_categorical(integer_classes, num_classes=2):
integer_classes = np.asarray(integer_classes, dtype='int') num_samples = integer_classes.shape[0] categorical = np.zeros((num_samples, num_classes)) categorical[np.arange(num_samples), integer_classes] = 1 return categorical
blanc_test.py
import unittest from sacrerouge.common.testing.metric_test_cases import DocumentBasedMetricTestCase from sacrerouge.common.testing.util import sacrerouge_command_exists from sacrerouge.metrics import Blanc from sacrerouge.metrics.blanc import BLANC_INSTALLED @unittest.skipIf(not BLANC_INSTALLED, '"blanc" not installed') class TestBlanc(DocumentBasedMetricTestCase):
def test_command_exists(self): assert sacrerouge_command_exists(['blanc']) def test_setup_command_exists(self): assert sacrerouge_command_exists(['setup-metric', 'blanc']) def test_blanc_help_regression(self): # Tests the examples from the official github repo, but they do not match the scores there and I don't know why metric = Blanc(blanc_type='blanc_help', random_seed=4) documents_list = [ ['Jack drove his minivan to the bazaar to purchase milk and honey for his large family.'], ['As Jill started taking a walk in the park, she certainly noticed that the trees were extra green this year.'] ] summaries_list = [ ['Jack bought milk and honey.', 'Jack drove to the bazaar in a minivan'], ['Jill saw green trees in the park.', 'The trees were green.'] ] expected_scores_list = [ [0.2222222222222222, 0.2222222222222222], [0.14285714285714285, 0.14285714285714285] ] actual_scores_list = metric.score_multi_all(summaries_list, documents_list) assert len(expected_scores_list) == len(actual_scores_list) for expected_scores, actual_scores in zip(expected_scores_list, actual_scores_list): assert len(expected_scores) == len(actual_scores) for expected, actual in zip(expected_scores, actual_scores): self.assertAlmostEqual(expected, actual['blanc_help'], places=4) def test_blanc_tune_repo(self): # Tests the examples from the official github repo. The scores don't match because they change based # on the random seed and the repo does not set the random seed metric = Blanc(blanc_type='blanc_tune', gap=6) documents_list = [ ['Jack drove his minivan to the bazaar to purchase milk and honey for his large family.'], ['As Jill started taking a walk in the park, she certainly noticed that the trees were extra green this year.'] ] summaries_list = [ ['Jack bought milk and honey.', 'Jack drove to the bazaar in a minivan'], ['Jill saw green trees in the park.', 'The trees were green.'] ] # The github repo shows the last 2 numbers are negative in some spots, but positive in others. I think # they're supposed to be positive expected_scores_list = [ [0.1111111111111111, 0.2222222222222222], [0.14285714285714285, 0.14285714285714285] ] actual_scores_list = metric.score_multi_all(summaries_list, documents_list) assert len(expected_scores_list) == len(actual_scores_list) for expected_scores, actual_scores in zip(expected_scores_list, actual_scores_list): assert len(expected_scores) == len(actual_scores) for expected, actual in zip(expected_scores, actual_scores): self.assertAlmostEqual(expected, actual['blanc_tune'], places=4)
reconciler.go
/* Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by injection-gen. DO NOT EDIT. package mydeployment import ( context "context" json "encoding/json" fmt "fmt" zap "go.uber.org/zap" corev1 "k8s.io/api/core/v1" equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" types "k8s.io/apimachinery/pkg/types" sets "k8s.io/apimachinery/pkg/util/sets" record "k8s.io/client-go/tools/record" controller "knative.dev/pkg/controller" kmp "knative.dev/pkg/kmp" logging "knative.dev/pkg/logging" reconciler "knative.dev/pkg/reconciler" v1 "knative.dev/sample-controller/pkg/apis/demo/v1" versioned "knative.dev/sample-controller/pkg/client/clientset/versioned" demov1 "knative.dev/sample-controller/pkg/client/listers/demo/v1" ) // Interface defines the strongly typed interfaces to be implemented by a // controller reconciling v1.MyDeployment. type Interface interface { // ReconcileKind implements custom logic to reconcile v1.MyDeployment. Any changes // to the objects .Status or .Finalizers will be propagated to the stored // object. It is recommended that implementors do not call any update calls // for the Kind inside of ReconcileKind, it is the responsibility of the calling // controller to propagate those properties. The resource passed to ReconcileKind // will always have an empty deletion timestamp. ReconcileKind(ctx context.Context, o *v1.MyDeployment) reconciler.Event } // Finalizer defines the strongly typed interfaces to be implemented by a // controller finalizing v1.MyDeployment. type Finalizer interface { // FinalizeKind implements custom logic to finalize v1.MyDeployment. Any changes // to the objects .Status or .Finalizers will be ignored. Returning a nil or // Normal type reconciler.Event will allow the finalizer to be deleted on // the resource. The resource passed to FinalizeKind will always have a set // deletion timestamp. FinalizeKind(ctx context.Context, o *v1.MyDeployment) reconciler.Event } // ReadOnlyInterface defines the strongly typed interfaces to be implemented by a // controller reconciling v1.MyDeployment if they want to process resources for which // they are not the leader. type ReadOnlyInterface interface { // ObserveKind implements logic to observe v1.MyDeployment. // This method should not write to the API. ObserveKind(ctx context.Context, o *v1.MyDeployment) reconciler.Event } type doReconcile func(ctx context.Context, o *v1.MyDeployment) reconciler.Event // reconcilerImpl implements controller.Reconciler for v1.MyDeployment resources. type reconcilerImpl struct { // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs // Client is used to write back status updates. Client versioned.Interface // Listers index properties about resources. Lister demov1.MyDeploymentLister // Recorder is an event recorder for recording Event resources to the // Kubernetes API. Recorder record.EventRecorder // configStore allows for decorating a context with config maps. // +optional configStore reconciler.ConfigStore // reconciler is the implementation of the business logic of the resource. reconciler Interface // finalizerName is the name of the finalizer to reconcile. finalizerName string // skipStatusUpdates configures whether or not this reconciler automatically updates // the status of the reconciled resource. skipStatusUpdates bool } // Check that our Reconciler implements controller.Reconciler. var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister demov1.MyDeploymentLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler
// Reconcile implements controller.Reconciler func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { logger := logging.FromContext(ctx) // Initialize the reconciler state. This will convert the namespace/name // string into a distinct namespace and name, determine if this instance of // the reconciler is the leader, and any additional interfaces implemented // by the reconciler. Returns an error is the resource key is invalid. s, err := newState(key, r) if err != nil { logger.Error("Invalid resource key: ", key) return nil } // If we are not the leader, and we don't implement either ReadOnly // observer interfaces, then take a fast-path out. if s.isNotLeaderNorObserver() { return controller.NewSkipKey(key) } // If configStore is set, attach the frozen configuration to the context. if r.configStore != nil { ctx = r.configStore.ToContext(ctx) } // Add the recorder to context. ctx = controller.WithEventRecorder(ctx, r.Recorder) // Get the resource with this namespace/name. getter := r.Lister.MyDeployments(s.namespace) original, err := getter.Get(s.name) if errors.IsNotFound(err) { // The resource may no longer exist, in which case we stop processing and call // the ObserveDeletion handler if appropriate. logger.Debugf("Resource %q no longer exists", key) if del, ok := r.reconciler.(reconciler.OnDeletionInterface); ok { return del.ObserveDeletion(ctx, types.NamespacedName{ Namespace: s.namespace, Name: s.name, }) } return nil } else if err != nil { return err } // Don't modify the informers copy. resource := original.DeepCopy() var reconcileEvent reconciler.Event name, do := s.reconcileMethodFor(resource) // Append the target method to the logger. logger = logger.With(zap.String("targetMethod", name)) switch name { case reconciler.DoReconcileKind: // Set and update the finalizer on resource if r.reconciler // implements Finalizer. if resource, err = r.setFinalizerIfFinalizer(ctx, resource); err != nil { return fmt.Errorf("failed to set finalizers: %w", err) } if !r.skipStatusUpdates { reconciler.PreProcessReconcile(ctx, resource) } // Reconcile this copy of the resource and then write back any status // updates regardless of whether the reconciliation errored out. reconcileEvent = do(ctx, resource) if !r.skipStatusUpdates { reconciler.PostProcessReconcile(ctx, resource, original) } case reconciler.DoFinalizeKind: // For finalizing reconcilers, if this resource being marked for deletion // and reconciled cleanly (nil or normal event), remove the finalizer. reconcileEvent = do(ctx, resource) if resource, err = r.clearFinalizer(ctx, resource, reconcileEvent); err != nil { return fmt.Errorf("failed to clear finalizers: %w", err) } case reconciler.DoObserveKind: // Observe any changes to this resource, since we are not the leader. reconcileEvent = do(ctx, resource) } // Synchronize the status. switch { case r.skipStatusUpdates: // This reconciler implementation is configured to skip resource updates. // This may mean this reconciler does not observe spec, but reconciles external changes. case equality.Semantic.DeepEqual(original.Status, resource.Status): // If we didn't change anything then don't call updateStatus. // This is important because the copy we loaded from the injectionInformer's // cache may be stale and we don't want to overwrite a prior update // to status with this stale state. case !s.isLeader: // High-availability reconcilers may have many replicas watching the resource, but only // the elected leader is expected to write modifications. logger.Warn("Saw status changes when we aren't the leader!") default: if err = r.updateStatus(ctx, original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", resource.Name, err) return err } } // Report the reconciler event, if any. if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { logger.Infow("Returned an event", zap.Any("event", reconcileEvent)) r.Recorder.Event(resource, event.EventType, event.Reason, event.Error()) // the event was wrapped inside an error, consider the reconciliation as failed if _, isEvent := reconcileEvent.(*reconciler.ReconcilerEvent); !isEvent { return reconcileEvent } return nil } if controller.IsSkipKey(reconcileEvent) { // This is a wrapped error, don't emit an event. } else if ok, _ := controller.IsRequeueKey(reconcileEvent); ok { // This is a wrapped error, don't emit an event. } else { logger.Errorw("Returned an error", zap.Error(reconcileEvent)) r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) } return reconcileEvent } return nil } func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1.MyDeployment, desired *v1.MyDeployment) error { existing = existing.DeepCopy() return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. if attempts > 0 { getter := r.Client.SamplesV1().MyDeployments(desired.Namespace) existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) if err != nil { return err } } // If there's nothing to update, just return. if equality.Semantic.DeepEqual(existing.Status, desired.Status) { return nil } if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" { logging.FromContext(ctx).Debug("Updating status with: ", diff) } existing.Status = desired.Status updater := r.Client.SamplesV1().MyDeployments(existing.Namespace) _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) return err }) } // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.MyDeployment) (*v1.MyDeployment, error) { getter := r.Lister.MyDeployments(resource.Namespace) actual, err := getter.Get(resource.Name) if err != nil { return resource, err } // Don't modify the informers copy. existing := actual.DeepCopy() var finalizers []string // If there's nothing to update, just return. existingFinalizers := sets.NewString(existing.Finalizers...) desiredFinalizers := sets.NewString(resource.Finalizers...) if desiredFinalizers.Has(r.finalizerName) { if existingFinalizers.Has(r.finalizerName) { // Nothing to do. return resource, nil } // Add the finalizer. finalizers = append(existing.Finalizers, r.finalizerName) } else { if !existingFinalizers.Has(r.finalizerName) { // Nothing to do. return resource, nil } // Remove the finalizer. existingFinalizers.Delete(r.finalizerName) finalizers = existingFinalizers.List() } mergePatch := map[string]interface{}{ "metadata": map[string]interface{}{ "finalizers": finalizers, "resourceVersion": existing.ResourceVersion, }, } patch, err := json.Marshal(mergePatch) if err != nil { return resource, err } patcher := r.Client.SamplesV1().MyDeployments(resource.Namespace) resourceName := resource.Name updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed", "Failed to update finalizers for %q: %v", resourceName, err) } else { r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate", "Updated %q finalizers", resource.GetName()) } return updated, err } func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.MyDeployment) (*v1.MyDeployment, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } finalizers := sets.NewString(resource.Finalizers...) // If this resource is not being deleted, mark the finalizer. if resource.GetDeletionTimestamp().IsZero() { finalizers.Insert(r.finalizerName) } resource.Finalizers = finalizers.List() // Synchronize the finalizers filtered by r.finalizerName. return r.updateFinalizersFiltered(ctx, resource) } func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.MyDeployment, reconcileEvent reconciler.Event) (*v1.MyDeployment, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } if resource.GetDeletionTimestamp().IsZero() { return resource, nil } finalizers := sets.NewString(resource.Finalizers...) if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { if event.EventType == corev1.EventTypeNormal { finalizers.Delete(r.finalizerName) } } } else { finalizers.Delete(r.finalizerName) } resource.Finalizers = finalizers.List() // Synchronize the finalizers filtered by r.finalizerName. return r.updateFinalizersFiltered(ctx, resource) }
{ // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatal("Up to one options struct is supported, found: ", len(options)) } // Fail fast when users inadvertently implement the other LeaderAware interface. // For the typed reconcilers, Promote shouldn't take any arguments. if _, ok := r.(reconciler.LeaderAware); ok { logger.Fatalf("%T implements the incorrect LeaderAware interface. Promote() should not take an argument as genreconciler handles the enqueuing automatically.", r) } rec := &reconcilerImpl{ LeaderAwareFuncs: reconciler.LeaderAwareFuncs{ PromoteFunc: func(bkt reconciler.Bucket, enq func(reconciler.Bucket, types.NamespacedName)) error { all, err := lister.List(labels.Everything()) if err != nil { return err } for _, elt := range all { // TODO: Consider letting users specify a filter in options. enq(bkt, types.NamespacedName{ Namespace: elt.GetNamespace(), Name: elt.GetName(), }) } return nil }, }, Client: client, Lister: lister, Recorder: recorder, reconciler: r, finalizerName: defaultFinalizerName, } for _, opts := range options { if opts.ConfigStore != nil { rec.configStore = opts.ConfigStore } if opts.FinalizerName != "" { rec.finalizerName = opts.FinalizerName } if opts.SkipStatusUpdates { rec.skipStatusUpdates = true } if opts.DemoteFunc != nil { rec.DemoteFunc = opts.DemoteFunc } } return rec }
filtered_emnist_data_utils.py
# Copyright 2019, Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility for filtering (via class. accuracy) the Federated EMNIST dataset.""" import csv import functools import os.path import tensorflow as tf import tensorflow_federated as tff from gans.experiments.emnist import emnist_data_utils BASE_URL = 'https://storage.googleapis.com/tff-experiments-public/' CSVS_BASE_PATH = 'gans/csvs/' @functools.lru_cache(maxsize=1) def get_unfiltered_client_data_for_training(batch_size): r"""Returns `tff.simulation.datasets.ClientData` of unfiltered Federated EMNIST data. The data returned will neither be filtered by user nor by example, so training can take place with all users and all examples for each user. Args: batch_size: Batch size of output dataset. If None, don't batch. Returns: A tff.simulation.datasets.ClientData` of real images of numbers/letters. The data has not been filtered. """ return get_filtered_client_data_for_training(None, None, batch_size) @functools.lru_cache(maxsize=1) def get_filtered_by_user_client_data_for_training(invert_imagery_probability, accuracy_threshold, batch_size, cache_dir=None): r"""Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data. Input data gets filtered on a per-user basis; users get selected via the `accuracy_threshold` criterion, and then training can take place with all examples from only the selected users. Args: invert_imagery_probability: The probability that a user\'s image data has pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10% probability that a user\'s data is flipped. Note that to save time in experiment execution, this is precomputed via the ./filter_users.py script, and the selection here controls which file to read from. accuracy_threshold: Indicates the classification threshold by which a user is included in the training population. E.g., `lt0p882` means any user who\'s data cumulatively classifies with <0.882 accuracy would be used for training; `gt0p939` means any user who\'s data cumulatively classifies with >0.939 accuracy would be used for training. To save time in experiment execution, this assignment is precomputed via the ./filter_users.py script, and the flag selection here is to indicate which file to read from. batch_size: Batch size of output dataset. If None, don't batch. cache_dir: (Optional) base directory to cache the downloaded files. If None, caches in Keras' default cache directory. Returns: A tff.simulation.datasets.ClientData` of real images of numbers/letters. The data has been filtered by user classification accuracy as per the input arguments. """ path_to_data = os.path.join(CSVS_BASE_PATH, 'inv_prob_{}'.format(invert_imagery_probability), 'filter_by_user', 'acc_{}'.format(accuracy_threshold)) try: filename = 'client_ids.csv' path_to_read_inversions_csv = tf.keras.utils.get_file( fname=filename, cache_subdir=path_to_data, cache_dir=cache_dir, origin=os.path.join(BASE_URL, path_to_data, filename)) except Exception: msg = ('A URL fetch failure was encountered when trying to retrieve ' 'filter-by-user generated csv file with invert_imagery_probability ' '`{}` and accuracy_threshold `{}`. Please run the ./filter_users.py ' 'script to generate the missing data, and use the `cache_dir` ' 'argument to this method to specify the location of the generated ' 'data csv file.'.format(invert_imagery_probability, accuracy_threshold)) raise ValueError(msg) return get_filtered_client_data_for_training(path_to_read_inversions_csv, None, batch_size) @functools.lru_cache(maxsize=1) def get_filtered_by_example_client_data_for_training(invert_imagery_probability, min_num_examples, example_class_selection, batch_size, cache_dir=None): r"""Returns `tff.simulation.datasets.ClientData` of filtered Federated EMNIST data. Input data gets filtered on a per-example basis. Any user meeting the `min_num_examples` criterion is included. The examples are limited to those that classified according to the `example_class_selection` criterion. Args: invert_imagery_probability: The probability that a user\'s image data has pixel intensity inverted. E.g., `0p1` corresponds to 0.1, or a 10% probability that a user\'s data is flipped. Note that to save time in experiment execution, this is precomputed via the ./filter_examples.py scripts, and the selection here controls which file to read from. min_num_examples: Indicates the minimum number of examples that are either correct or incorrect (as set by the `example_class_selection` argument) in a client\'s local dataset for that client to be considered as part of training sub-population. To save time in experiment execution, this assignment is precomputed via the ./filter_examples.py script, and the flag selection here is to indicate which file to read from. example_class_selection: Indicates whether to train on a client\'s correct or incorrect examples. To save time in experiment execution, this assignment is precomputed via the ./filter_examples.py script, and the flag selection here is to indicate which file to read from. batch_size: Batch size of output dataset. If None, don't batch. cache_dir: (Optional) base directory to cache the downloaded files. If None, caches in Keras' default cache directory. Returns: A `tff.simulation.datasets.ClientData` of real images of numbers/letters. The data has been filtered as per the input arguments (either not filtered, filtered by user classification accuracy, or filtered by example classification correctness). """ path_to_data = os.path.join(CSVS_BASE_PATH, 'inv_prob_{}'.format(invert_imagery_probability), 'filter_by_example', 'min_num_examples_{}'.format(min_num_examples), '{}'.format(example_class_selection)) try: filename = 'client_ids.csv' path_to_read_inversions_csv = tf.keras.utils.get_file( fname=filename, cache_subdir=path_to_data, cache_dir=cache_dir, origin=os.path.join(BASE_URL, path_to_data, filename)) filename = 'example_indices_map.csv' path_to_read_example_indices_csv = tf.keras.utils.get_file( fname=filename, cache_subdir=path_to_data, cache_dir=cache_dir, origin=os.path.join(BASE_URL, path_to_data, filename)) except Exception: msg = ('A URL fetch failure was encountered when trying to retrieve ' 'filter-by-example generated csv files with ' 'invert_imagery_probability `{}`, min_num_examples `{}`, and ' 'example_class_selection `{}`. Please run the ./filter_examples.py ' 'script to generate the missing data, and use the `cache_dir` ' 'argument to this method to specify the location of the generated ' 'data csv files.'.format(invert_imagery_probability, min_num_examples, example_class_selection)) raise ValueError(msg) return get_filtered_client_data_for_training( path_to_read_inversions_csv, path_to_read_example_indices_csv, batch_size) def get_filtered_client_data_for_training(path_to_read_inversions_csv, path_to_read_example_indices_csv, batch_size): """Form ClientData using paths to pixel inversion, example selection data.""" raw_client_data = emnist_data_utils.create_real_images_tff_client_data( 'train') client_ids = raw_client_data.client_ids selected_client_ids_inversion_map = None client_ids_example_indices_map = None # If filter-by-user or filter-by-example, load the csv data into maps, and # update the client IDs to just the users that will be part of training. if path_to_read_inversions_csv is not None: selected_client_ids_inversion_map, client_ids_example_indices_map = ( _get_client_ids_inversion_and_example_indices_maps( path_to_read_inversions_csv, path_to_read_example_indices_csv)) client_ids = list(selected_client_ids_inversion_map.keys()) def _get_dataset(client_id): """Retrieve/preprocess a tf.data.Dataset for a given client_id.""" raw_ds = raw_client_data.create_tf_dataset_for_client(client_id) invert_imagery = False if selected_client_ids_inversion_map:
# If filter-by-example, do it here. if client_ids_example_indices_map: raw_ds = _filter_by_example(raw_ds, client_ids_example_indices_map, client_id) return emnist_data_utils.preprocess_img_dataset( raw_ds, invert_imagery=invert_imagery, include_label=False, batch_size=batch_size, shuffle=True, repeat=False) return tff.simulation.datasets.ClientData.from_clients_and_fn( client_ids, _get_dataset) def _filter_by_example(raw_ds, client_ids_example_indices_map, client_id): """Form a tf.data.Dataset from the examples in the map for the client_id.""" example_indices = client_ids_example_indices_map[client_id] # B/c the csv stores the list as a string, we need to do some slightly # klugey conversion from a string to list. (We strip off the first and # last characters in the string, which are [ and ], and then split on # commas as delimiters, to recover the original list of ints. example_indices = [int(s) for s in example_indices[1:-1].split(',')] # Get the elements (OrderedDicts) in the raw data which are at the indices # indicated by the list above. elements = [] index = 0 for element in raw_ds: if index in example_indices: elements.append(element) index += 1 # Bind the elements (via a generator fn) into a new tf.data.Dataset. def _generator(): for element in elements: yield element return tf.data.Dataset.from_generator(_generator, raw_ds.output_types, raw_ds.output_shapes) def _get_client_ids_inversion_and_example_indices_maps( path_to_read_inversions_csv, path_to_read_example_indices_csv): """Return paths to csv files storing maps indicating the data to train on.""" if path_to_read_inversions_csv is None: raise ValueError( 'No path provided to the CSV file that stores map from client ids to ' 'image inversion data.') # Load (from CSV file) the specific client IDs that the GAN will train on, and # whether or not the images on that client are inverted. selected_client_ids_inversion_map = {} with tf.io.gfile.GFile(path_to_read_inversions_csv, 'r') as csvfile: csvreader = csv.reader(csvfile) for [key, val] in csvreader: selected_client_ids_inversion_map[key] = (val == 'True') # If specified (via CSV file), the specific examples on each client ID that # the GAN will be trained on. client_ids_example_indices_map = None if path_to_read_example_indices_csv: client_ids_example_indices_map = {} with tf.io.gfile.GFile(path_to_read_example_indices_csv, 'r') as csvfile: csvreader = csv.reader(csvfile) for [key, val] in csvreader: client_ids_example_indices_map[key] = val set_1 = set(client_ids_example_indices_map.keys()) set_2 = set(selected_client_ids_inversion_map.keys()) symmetric_diff = set_1 ^ set_2 if symmetric_diff: raise ValueError( 'The CSV files at path_to_read_inversions_csv and ' 'path_to_read_example_indices_csv contain different keys.') return selected_client_ids_inversion_map, client_ids_example_indices_map
invert_imagery = selected_client_ids_inversion_map[client_id]
game.js
var socket, Codename = io('/codename', {forceNew:true}); var cardsTable = [], libraryOfImages = [], imagesOnDisplay = [], totalImages = 0; var animalImages = [], animeImages = [], cartoonImages = [], gameImages = [], additionalImages = []; var spriteRatioWidthtoHeight =1, spriteRatioHeighttoWidth=1; var Width, Height, Game_State = "Start"; var startGame, about, categories=[], steps = 0, objects = []; var credits, seriesTitle, characterName, imageSource , creditsCard = null, creditsImage = null, creditsScrollCircle= null, creditsScrollSection = null; var cardSheet, boardDisplay, listDisplay, cardBoard = []; var leftScores = 0, rightScores = 0, ScoreBoard = [], saveLeftScoreTexture = null, saveRightScoreTexture = null; var saveCardWhiteTexture = null, saveCardBlackTexture = null; var team1 = null, team2 = null, timer = null, timerSetting = "Off", timesUp; var sectionTitle, cardText, returnToStartScreen; function init() { // create a scene, that will hold all our elements such as objects, cameras and lights. var scene = new THREE.Scene(); // create a camera, which defines where we're looking at. camera = new THREE.PerspectiveCamera(50, 500/ 500, 0.1, 1000); camera.position.set(0,0,53); scene.add(camera); //scene.background = new THREE.Color( 0x1a0a3a ); scene.background = new THREE.Color( 0x000000 ); // create a render and set the size var renderer = new THREE.WebGLRenderer({ antialias: true} ); //var renderer = new THREE.WebGLRenderer({ antialias: true, preserveDrawingBuffer: true } ); < -- image saving trial renderer.setClearColor(new THREE.Color(0x000000, 0.0)); Width = window.innerWidth*0.95; Height = window.innerHeight*1.025; renderer.setSize(Width, Height); //camera.aspect = window.innerWidth/window.innerHeight; camera.aspect = Width/Height; //socket = io.connect('http://localhost:9000'); socket = io.connect('http://ec2-34-205-146-82.compute-1.amazonaws.com:9000'); //Insert Data from the Server Codename.on('CountDown', function(data) { // console.log(data.Count) if(Game_State == "Game"){ if(data.Count <= 60) { // if(data.Count <= 30){ timer.parameters.text = " " + data.Count + " "; if(scene.getObjectByName('timesUp') != null) scene.remove(timesUp); } else { timer.parameters.text = " "; if(scene.getObjectByName('timesUp') == null) scene.add(timesUp); } timer.update(); } }); // Sets the Board Card Types ( 'Team 1', 'Team 2', 'Assassin' and Null for civilians) Codename.on('Board', function(data) { if(Game_State == "Game"){ for(var x = 0; x < 24 && x<cardsTable.length; x++){ cardsTable[x].type= data.Board[x].type; console.log(x + ". " + cardsTable[x].type); } } else if(Game_State == "Board"){ for(var x = 0; x < 24 && x<cardBoard.length; x++){ if( data.Board[x].type == null){ cardBoard[x].material.color.setHex(0xffff00); } else if( data.Board[x].type == "Assassin"){ cardBoard[x].material.color.setHex(0x575757); } else if( data.Board[x].type == "Team 1"){ cardBoard[x].material.color.setHex(0x2260a7); } else if( data.Board[x].type == "Team 2"){ cardBoard[x].material.color.setHex(0xa73457); } } } }); // Receive information for the Group Leaders Codename.on('Info', function(data) { if(Game_State == "Board"){ for(var x = 0; x < 24 && x<cardBoard.length; x++){ if( data.Board[x].type == null){ cardBoard[x].material.color.setHex(0xffff00); } else if( data.Board[x].type == "Assassin"){ cardBoard[x].material.color.setHex(0x575757); } else if( data.Board[x].type == "Team 1"){ cardBoard[x].material.color.setHex(0x2260a7); } else if( data.Board[x].type == "Team 2"){ cardBoard[x].material.color.setHex(0xa73457); } } } }); //add the output of the renderer to the html element document.getElementById("WebGL-output").appendChild(renderer.domElement); //Keyboard Functions function onKeyDown(event) { if(event.keyCode == 32 && Game_State == "Game"){ window.open( renderer.domElement.toDataURL( 'image/png' ), 'screenshot' ); //Clear the Scene while(imagesOnDisplay.length >=1){ scene.remove(imagesOnDisplay[0]); imagesOnDisplay.shift(); } //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } //Clear the Score Board while(ScoreBoard.length >=1){ scene.remove(ScoreBoard[0]); ScoreBoard.shift(); } libraryOfImages = []; load_Start_Screen(); } }; document.addEventListener('keydown', onKeyDown, false); //add spotlight for the shadows var spotLight = new THREE.SpotLight(0xffffff); spotLight.position.set(0, 0, 25); spotLight.castShadow = false; spotLight.intensity =2; scene.add(spotLight); renderScene(); drag_objects(); load_Text_and_Buttons(); load_Start_Screen(); load_Animals_Images(); load_Anime_Images(); load_Cartoon_Images(); load_Game_Images(); load_Additional_Images(); //Render the Scenes function renderScene(){ try{ steps++; //Render steps //render using requestAnimationFrame requestAnimationFrame(renderScene); renderer.render(scene, camera); scene.traverse(function (e) { if(e.name == "leftSiding" && steps % 25 == 0 && Game_State != "Start"){ if(e.style == 1){ e.material.color = new THREE.Color("rgb(50,165,250)"); e.style = 2; } else if(e.style == 2){ e.material.color = new THREE.Color("rgb(23,155,220)"); e.style = 1; } } else if(e.name == "rightSiding" && steps % 25 == 0 && Game_State != "Start"){ if(e.style == 1){ e.material.color = new THREE.Color("rgb(250,93,93)"); e.style = 2; } else if(e.style == 2){ e.material.color = new THREE.Color("rgb(220,53,53)"); e.style = 1; } } }); }catch(e){} } //Make Objects Draggable - Additionally used as buttons function drag_objects(){ var dragControls = new THREE.DragControls( objects, camera, renderer.domElement ); dragControls.addEventListener( 'dragstart', function(event) { // Card Holders if (event.object.name == "cardHolder" && event.object.revealed == false){ // Civilian if(cardsTable[event.object.cardNumber].type == null){ // Yellow event.object.material.color = new THREE.Color("rgb(255,255,0)"); } // Team 1 else if(cardsTable[event.object.cardNumber].type == "Team 1"){ // Blue event.object.material.color = new THREE.Color("rgb(23,155,220)"); ScoreBoard[leftScores].material.color = new THREE.Color("rgb(23,155,220)"); leftScores ++; } // Team 2 else if(cardsTable[event.object.cardNumber].type == "Team 2"){ // Red event.object.material.color = new THREE.Color("rgb(220,53,53)"); ScoreBoard[ScoreBoard.length + rightScores - 8].material.color = new THREE.Color("rgb(220,53,53)"); rightScores ++; } // Assassin else if(cardsTable[event.object.cardNumber].type == "Assassin"){ // Black event.object.material.color = new THREE.Color("rgb(23,23,23)"); } event.object.revealed = true; /** if(Math.floor(Math.random()*2)==0 && leftScores < 8){ // Blue event.object.material.color = new THREE.Color("rgb(23,155,220)"); ScoreBoard[leftScores].material.color = new THREE.Color("rgb(23,155,220)"); leftScores ++; } else if( rightScores < 8){ // Red event.object.material.color = new THREE.Color("rgb(220,53,53)"); ScoreBoard[ScoreBoard.length + rightScores - 8].material.color = new THREE.Color("rgb(220,53,53)"); rightScores ++; } **/ } // Timer button else if (event.object.name == "timer"){ if(timerSetting == "Off"){ timerSetting = "On"; // console.log(timerSetting) timer.parameters.text = " 60 "; Codename.emit('Timer On'); } else if(timerSetting == "On"){ timerSetting = "Off"; // console.log(timerSetting); timer.parameters.text = "Timer"; Codename.emit('Timer Off'); } timer.update(); } // Credits button else if (event.object.name == "Credits"){ //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } go_to_Credit_Screen(); } // About button else if(event.object.name == "About"){ //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } go_to_About_Screen(); } // cardSheet button else if(event.object.name == "cardSheet"){ //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } go_to_Card_Screen(); } // Start Game button else if (event.object.name == "Start Game"){ //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } go_to_Game_Board(); } // Return else if (event.object.name == "Return"){ scene.remove(sectionTitle); scene.remove(cardText); //Clear the clickable/draggable objects while(objects.length >=1){ objects.shift(); } // Correct categories if(Game_State == "Credits"){ scene.remove(seriesTitle); scene.remove(characterName); scene.remove(imageSource); scene.remove(creditsImage); scene.remove(creditsCard); scene.remove(creditsScrollCircle); //Clear the selections objects for(var x= 0; x< creditsScrollSection.length; x++){ scene.remove(creditsScrollSection[x]); } categories[0].parameters.font= "135px Arial"; categories[0].position.y = categories[0].position.y+8; categories[0].position.x = categories[0].position.x +16.5; categories[0].update(); for(var x = 1; x < categories.length; x++){ categories[x].credits = false; categories[x].posY = categories[x].posY +7; categories[x].position.y = categories[x].posY; categories[x].parameters.font= "135px Arial"; categories[x].update(); } } else if(Game_State == "Board"){ scene.remove(boardDisplay); scene.remove(listDisplay); for(var x= 0; x< cardBoard.length; x++){ scene.remove(cardBoard[x]); } } // Return to Start Screen scene.remove(returnToStartScreen); load_Start_Screen(); } // Go to Image Source else if (event.object.name == "imageSource"){ //Opens the Url to the Source window.open(event.object.url, '_blank'); } // Animes Categories else if(event.object.name == "anime"){ // Game Settings if(Game_State == "Start"){ // Change the Text Color if(anime.includeCards == true) anime.parameters.fillStyle= "#552020"; else anime.parameters.fillStyle= "Crimson"; anime.includeCards = !anime.includeCards; } // Credits else if(Game_State == "Credits"){ // Change the Text Color if(anime.credits != true){ anime.parameters.fillStyle= "Crimson"; anime.credits = true; cartoon.credits = false; game.credits = false; // Change the Text Color for the now chosen Options cartoon.parameters.fillStyle= "#533200"; game.parameters.fillStyle= "#003300"; cartoon.update(); game.update(); // Update Scene // Change Selection Background color creditsScrollSection[0].material.color.setHex(0xfa3a3a); // Image - Check to see if the first image is already loaded if(animeImages[0].sprite == null){ animeImages[0].sprite = loadImagesfromText(animeImages[0].filename,"Anime",animeImages[0].backgroundColor); animeImages[0].sprite.characterName = animeImages[0].name; animeImages[0].sprite.backgroundColor = animeImages[0].backgroundColor; } creditsImage.material = animeImages[0].sprite; // Update Card if(animeImages[0].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(animeImages[0].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Setting the Series and Character Name in the Credits characterName.parameters.text = animeImages[0].name; characterName.update(); seriesTitle.parameters.text = "Series: "+animeImages[0].series; seriesTitle.update(); // Update Selections for(var x= 1; x<creditsScrollSection.length; x++){ scene.add(creditsScrollSection[x]) objects.push(creditsScrollSection[x]) creditsScrollSection[x].arrayNumber = x-1; creditsScrollSection[x].parameters.text = animeImages[x-1].series; creditsScrollSection[x].update(); } //Update Link imageSource.url = animeImages[0].source; // Reset Scroll Bar creditsScrollCircle.position.y = 5; } } anime.update(); } // Cartoons Categories else if(event.object.name == "cartoon"){ if(Game_State == "Start"){ // Change the Text Color if(cartoon.includeCards == true) cartoon.parameters.fillStyle= "#533200"; else cartoon.parameters.fillStyle= "orangered"; cartoon.includeCards = !cartoon.includeCards; } // Credits else if(Game_State == "Credits"){ // Change the Text Color if(cartoon.credits != true){ cartoon.parameters.fillStyle= "orangered"; cartoon.credits = true; anime.credits = false; game.credits = false; // Change the Text Color for the now chosen Options anime.parameters.fillStyle= "#552020"; game.parameters.fillStyle= "#003300"; anime.update(); game.update(); // Update Scene // Change Selection Background color creditsScrollSection[0].material.color.setHex(0xfa7a3a); // Image - Check to see if the first image is already loaded if(cartoonImages[0].sprite == null){ cartoonImages[0].sprite = loadImagesfromText(cartoonImages[0].filename,"Cartoon",cartoonImages[0].backgroundColor); cartoonImages[0].sprite.characterName = cartoonImages[0].name; cartoonImages[0].sprite.backgroundColor = cartoonImages[0].backgroundColor; } creditsImage.material = cartoonImages[0].sprite; // Update Card if(cartoonImages[0].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(cartoonImages[0].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Setting the Series and Character Name in the Credits characterName.parameters.text = cartoonImages[0].name; characterName.update(); seriesTitle.parameters.text = "Series: "+cartoonImages[0].series; seriesTitle.update(); // Update Selections for(var x= 1; x<creditsScrollSection.length; x++){ scene.add(creditsScrollSection[x]) objects.push(creditsScrollSection[x]) creditsScrollSection[x].arrayNumber = x-1; creditsScrollSection[x].parameters.text = cartoonImages[x-1].series; creditsScrollSection[x].update(); } //Update Link imageSource.url = cartoonImages[0].source; // Reset Scroll Bar creditsScrollCircle.position.y = 5; } } cartoon.update(); } // Games Categories else if(event.object.name == "game"){ if(Game_State == "Start"){ // Change the Text Color if(game.includeCards == true) game.parameters.fillStyle= "#003300"; else game.parameters.fillStyle= "Lime"; game.includeCards = !game.includeCards; } // Credits else if(Game_State == "Credits"){ // Change the Text Color if(game.credits != true){ game.parameters.fillStyle= "Lime"; game.credits = true; anime.credits = false; cartoon.credits = false; // Change the Text Color for the now chosen Options anime.parameters.fillStyle= "#552020"; cartoon.parameters.fillStyle= "#533200"; anime.update(); cartoon.update(); // Update Scene // Change Selection Background color creditsScrollSection[0].material.color.setHex(0x3a7a3a); // Image - Check to see if the first image is already loaded if(gameImages[0].sprite == null){ gameImages[0].sprite = loadImagesfromText(gameImages[0].filename,"Game",gameImages[0].backgroundColor); gameImages[0].sprite.characterName = gameImages[0].name; gameImages[0].sprite.backgroundColor = gameImages[0].backgroundColor; } creditsImage.material = gameImages[0].sprite; // Update Card if(gameImages[0].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(gameImages[0].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Setting the Series and Character Name in the Credits characterName.parameters.text = gameImages[0].name; characterName.update(); seriesTitle.parameters.text = "Series: "+gameImages[0].series; seriesTitle.update(); // Update Selections for(var x= 1; x<creditsScrollSection.length; x++){ scene.add(creditsScrollSection[x]) objects.push(creditsScrollSection[x]) creditsScrollSection[x].arrayNumber = x-1; creditsScrollSection[x].parameters.text = gameImages[x-1].series; creditsScrollSection[x].update(); } //Update Link imageSource.url = gameImages[0].source; // Reset Scroll Bar creditsScrollCircle.position.y = 5; } } game.update(); } // Credits Selection else if(event.object.name == "Selection"){ // Anime
animeImages[event.object.arrayNumber].sprite.backgroundColor = animeImages[event.object.arrayNumber].backgroundColor; } creditsImage.material = animeImages[event.object.arrayNumber].sprite; // Update Text characterName.parameters.text = animeImages[event.object.arrayNumber].name; seriesTitle.parameters.text = "Series: "+animeImages[event.object.arrayNumber].series; characterName.update(); seriesTitle.update(); // Update Card // Make the Card Background Black if(animeImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(animeImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Update Link imageSource.url = animeImages[event.object.arrayNumber].source; } // Cartoon else if(categories[2].credits == true){ if(cartoonImages[event.object.arrayNumber].sprite == null){ cartoonImages[event.object.arrayNumber].sprite = loadImagesfromText(cartoonImages[event.object.arrayNumber].filename,"Cartoon",cartoonImages[event.object.arrayNumber].backgroundColor); cartoonImages[event.object.arrayNumber].sprite.characterName = cartoonImages[event.object.arrayNumber].name; cartoonImages[event.object.arrayNumber].sprite.backgroundColor = cartoonImages[event.object.arrayNumber].backgroundColor; } creditsImage.material = cartoonImages[event.object.arrayNumber].sprite; // Update Text characterName.parameters.text = cartoonImages[event.object.arrayNumber].name; seriesTitle.parameters.text = "Series: "+cartoonImages[event.object.arrayNumber].series; characterName.update(); seriesTitle.update(); // Update Card // Make the Card Background Black if(cartoonImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(cartoonImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Update Link imageSource.url = cartoonImages[event.object.arrayNumber].source; } // Games else if(categories[3].credits == true){ if(gameImages[event.object.arrayNumber].sprite == null){ gameImages[event.object.arrayNumber].sprite = loadImagesfromText(gameImages[event.object.arrayNumber].filename,"Game",gameImages[event.object.arrayNumber].backgroundColor); gameImages[event.object.arrayNumber].sprite.characterName = gameImages[event.object.arrayNumber].name; gameImages[event.object.arrayNumber].sprite.backgroundColor = gameImages[event.object.arrayNumber].backgroundColor; } creditsImage.material = gameImages[event.object.arrayNumber].sprite; // Update Text characterName.parameters.text = gameImages[event.object.arrayNumber].name; seriesTitle.parameters.text = "Series: "+gameImages[event.object.arrayNumber].series; characterName.update(); seriesTitle.update(); // Update Card // Make the Card Background Black if(gameImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(gameImages[event.object.arrayNumber].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); //Update Link imageSource.url = gameImages[event.object.arrayNumber].source; } } //console.log("lol start of drag: "); }); dragControls.addEventListener( 'drag', function(event) { if(event.object.name != "creditsScrollCircle") event.object.position.set(event.object.posX, event.object.posY, event.object.posZ); else{ // Keeps the Scroll bar in the same X position event.object.position.x = event.object.posX; // Limits the Height of the Scroll Bar if(event.object.position.y > 5) event.object.position.y = 5; else if(event.object.position.y < -16) event.object.position.y = -16; // Make this a negative because it's easier to work with var percentage = event.object.position.y - 5; if(categories[1].credits == true) percentage = Math.floor(-percentage/21 * (animeImages.length - 9)); else if(categories[2].credits == true) percentage = Math.floor(-percentage/21 * (cartoonImages.length - 9)); else if(categories[3].credits == true) percentage = Math.floor(-percentage/21 * (gameImages.length - 9)); for(var x= 1; x<creditsScrollSection.length; x++){ //scene.add(creditsScrollSection[x]) //objects.push(creditsScrollSection[x]) creditsScrollSection[x].arrayNumber = x-1+percentage; if(categories[1].credits == true) creditsScrollSection[x].parameters.text = animeImages[x-1+percentage].series; else if(categories[2].credits == true) creditsScrollSection[x].parameters.text = cartoonImages[x-1+percentage].series; else if(categories[3].credits == true) creditsScrollSection[x].parameters.text = gameImages[x-1+percentage].series; creditsScrollSection[x].update(); } } }); dragControls.addEventListener( 'dragend', function(event) { /** if (event.object.name == "creditsScrollCircle"){ //event.object.position.y = event.object.posY; } **/ }); //console.log(dragControls); //https://www.learnthreejs.com/drag-drop-dragcontrols-mouse/ } // Load the Start Screens function load_Start_Screen(){ // Start Game scene.add(startGame); objects.push(startGame); // Credits scene.add(credits); objects.push(credits); // About Game scene.add(about); objects.push(about) // cardSheet scene.add(cardSheet); objects.push(cardSheet) // Categories // The Categories Title scene.add(categories[0]); // The Categories for(var x = 1; x < categories.length; x++){ if(x % 5 == 1) categories[x].posX = -18; else if(x % 5 == 2) categories[x].posX = 0; else if(x % 5 == 3) categories[x].posX = 18; else if(x % 5 == 4) categories[x].posX = -9; else if(x % 5 == 0) categories[x].posX = 9; categories[x].posY = 17 - Math.floor((x-1)*5/12)*5; categories[x].position.set( categories[x].posX, categories[x].posY, categories[x].posZ); scene.add(categories[x]); objects.push(categories[x]); } // Updates the Color Selections // Anime if(anime.includeCards != true) anime.parameters.fillStyle= "#552020"; else anime.parameters.fillStyle= "Crimson"; // Cartoon if(cartoon.includeCards != true) cartoon.parameters.fillStyle= "#533200"; else cartoon.parameters.fillStyle= "orangered"; // Game if(game.includeCards != true) game.parameters.fillStyle= "#003300"; else game.parameters.fillStyle= "Lime"; anime.update(); cartoon.update(); game.update(); Game_State = "Start"; steps = 0; } // Load the Credits Screens function go_to_Credit_Screen(){ Game_State = "Credits"; scene.remove(startGame); scene.remove(credits); scene.remove(about); scene.remove(cardSheet); sectionTitle.parameters.text= "Credits:"; sectionTitle.parameters.fillStyle= "Gold"; sectionTitle.update(); scene.add(sectionTitle); scene.add(seriesTitle); scene.add(characterName); // Image - Check to see if the first image is already loaded if(animeImages[0].sprite == null){ animeImages[0].sprite = loadImagesfromText(animeImages[0].filename,"Anime",animeImages[0].backgroundColor); animeImages[0].sprite.characterName = animeImages[0].name; animeImages[0].sprite.backgroundColor = animeImages[0].backgroundColor; } // For the Credits Images if(creditsImage == null) creditsImage = new THREE.Sprite(); creditsImage.material = animeImages[0].sprite; creditsImage.position.set(3,-1,-2); //xyz //creditsImage.scale.set(6,8,1); < - Cards Scales creditsImage.scale.set(9,12,1); scene.add(creditsImage); if(creditsCard == null) creditsCard = new THREE.Sprite(); if(animeImages[0].sprite.backgroundColor.trim() == "Black") creditsCard.material = create_Black_Card(); //Make the Card Background White else if(animeImages[0].sprite.backgroundColor.trim() == "White") creditsCard.material = create_White_Card(); creditsCard.posX =3; creditsCard.posY = -3; creditsCard.posZ = -2.1; creditsCard.position.set(creditsCard.posX, creditsCard.posY, creditsCard.posZ); //xyz //creditsCard.scale.set(7,9,1); <- Cards Scales creditsCard.scale.set(10.5,13.5,1); scene.add(creditsCard); //Setting the Series and Character Name in the Credits characterName.parameters.text = animeImages[0].name; characterName.update(); seriesTitle.parameters.text = "Series: "+animeImages[0].series; seriesTitle.update(); //Set the Source Link imageSource.url = animeImages[0].source; scene.add(imageSource); objects.push(imageSource); // For the Scroll Bars Section //creditsScrollCircle= null, creditsScrollSection = null; if(creditsScrollCircle == null){ var geometry = new THREE.PlaneBufferGeometry (1, 2,0); var material = new THREE.MeshBasicMaterial( { color: 0x3a3a3a } ); creditsScrollCircle = new THREE.Mesh( geometry, material ); creditsScrollCircle.position.set(-8, 5, -2); creditsScrollCircle.posX = -8; creditsScrollCircle.posY = 5; creditsScrollCircle.posZ = -2; creditsScrollCircle.name = "creditsScrollCircle"; } objects.push(creditsScrollCircle) scene.add(creditsScrollCircle) if(creditsScrollSection == null){ creditsScrollSection = []; // Selection Background var geometry = new THREE.PlaneBufferGeometry (16, 25,0); var material = new THREE.MeshBasicMaterial( { color: 0xfa3a3a } ); var selectionBackground = new THREE.Mesh( geometry, material ); selectionBackground.position.set(-15, -6, -3); selectionBackground.material.transparent = true; selectionBackground.material.opacity = 0.3; selectionBackground.name = "selectionBackground"; creditsScrollSection.push(selectionBackground); // First Selection // Start Game var firstSelection = credits_Selection_Creation(0); creditsScrollSection.push(firstSelection); // Second Selection var secondSelection = credits_Selection_Creation(1); creditsScrollSection.push(secondSelection); // Third Selection var thirdSelection = credits_Selection_Creation(2); creditsScrollSection.push(thirdSelection); // Fourth Selection var fourthSelection = credits_Selection_Creation(3); creditsScrollSection.push(fourthSelection); // Fifth Selection var fifthSelection = credits_Selection_Creation(4); creditsScrollSection.push(fifthSelection); // Sixth Selection var sixthSelection = credits_Selection_Creation(5); creditsScrollSection.push(sixthSelection); // Seventh Selection var seventhSelection = credits_Selection_Creation(6); creditsScrollSection.push(seventhSelection); // Eigth Selection var eigthSelection = credits_Selection_Creation(7); creditsScrollSection.push(eigthSelection); // Nineth Selection var ninethSelection = credits_Selection_Creation(8); creditsScrollSection.push(ninethSelection); } creditsScrollSection[0].material.color.setHex(0xfa3a3a); scene.add(creditsScrollSection[0]); for(var x= 1; x<creditsScrollSection.length; x++){ scene.add(creditsScrollSection[x]) objects.push(creditsScrollSection[x]) creditsScrollSection[x].arrayNumber = x-1; creditsScrollSection[x].parameters.text = animeImages[x-1].series; creditsScrollSection[x].update(); } // categories categories[0].parameters.font= "90px Arial"; categories[0].position.y = categories[0].position.y -8; categories[0].position.x = categories[0].position.x -16.5; categories[0].update(); for(var x = 1; x < categories.length; x++){ categories[x].credits = false; categories[x].posY = categories[x].posY -7; categories[x].position.y = categories[x].posY; categories[x].parameters.font= "105px Arial"; categories[x].update(); objects.push(categories[x]); } anime.parameters.fillStyle= "Crimson"; anime.credits = true; anime.update(); cartoon.parameters.fillStyle= "#533200"; game.parameters.fillStyle= "#003300"; cartoon.update(); game.update(); // Return to Start Screen scene.add(returnToStartScreen); objects.push(returnToStartScreen); } // Load the Credits Screens function go_to_About_Screen(){ scene.remove(startGame); scene.remove(credits); scene.remove(about); scene.remove(cardSheet); sectionTitle.parameters.text= "About"; sectionTitle.parameters.fillStyle= "#4169E1"; sectionTitle.update(); scene.add(sectionTitle); scene.add(cardText); // categories for(var x = 0; x < categories.length; x++) scene.remove(categories[x]); // Return to Start Screen scene.add(returnToStartScreen); objects.push(returnToStartScreen); } // Load the Card Screens function go_to_Card_Screen(){ Game_State = "Board"; scene.remove(startGame); scene.remove(credits); scene.remove(about); scene.remove(cardSheet); // Add the Scene Title sectionTitle.parameters.text= "Board"; sectionTitle.parameters.fillStyle= "#afafaf"; sectionTitle.update(); scene.add(sectionTitle); // Input Board Display Button scene.add(boardDisplay); objects.push(boardDisplay); // Input List Display Button scene.add(listDisplay); objects.push(listDisplay); // Categories for(var x = 0; x < categories.length; x++) scene.remove(categories[x]); if( cardBoard.length == 0){ // Add the Squares for( var x = 0; x <24; x++){ var cards = create_Square(); cards.position.set(-18+(x%6)*7.5, 6 - Math.floor(x/6)*6.75, -3); cardBoard.push(cards); scene.add(cardBoard[cardBoard.length-1]); } } for( var x = 0; x <24; x++){ scene.add(cardBoard[x]); } // Get the Board info Codename.emit('Get Board Info'); // Return to Start Screen scene.add(returnToStartScreen); objects.push(returnToStartScreen); } // Go to the Game Board function go_to_Game_Board(){ scene.remove(startGame); scene.remove(credits); scene.remove(about); scene.remove(cardSheet); // categories for(var x = 0; x < categories.length; x++) scene.remove(categories[x]); Game_State = "Game"; leftScores = 0, rightScores = 0; cardsTable = []; for(var x = 0; x < 24; x++){ var card = { image: null, text : null, card : null, type: null } cardsTable.push(card); } fillLibraryOfImages(); // Lazy.. //var data = {size : 24, cards: cardsTable}; //Codename.emit('Start Game', data); //prompt(); < ------------------------------- Text typing trial } // Load Animals Images function load_Animals_Images(){ //Loading Anime Images from the File animeImages=[]; jQuery.get("Images/Animals/Animals.txt", undefined, function(data) { //Prints the full data //console.log(data); var dataLength = data.split("\n").length; //Printing out the info for(var x = 8; x < dataLength-4; x+=6){ //console.log("Series: "+data.split("\n")[x]); //console.log("Character Name: "+data.split("\n")[x+1]); //console.log("Filename: "+data.split("\n")[x+2]); //console.log("Background Color: "+data.split("\n")[x+3]); //console.log("Image Source: "+data.split("\n")[x+4]); //console.log(" "); var animals = { series : data.split("\n")[x], name : data.split("\n")[x+1], filename: data.split("\n")[x+2], backgroundColor: data.split("\n")[x+3], sprite: null, source: data.split("\n")[x+4] } animalImages.push(animals); } }, "html").done(function() { //console.log("second success"); }).fail(function(jqXHR, textStatus) { console.log("failed"); }).always(function() { console.log("Animals Loaded - "+animalImages.length+" images"); }); } // Load Anime Images function load_Anime_Images(){ //Loading Anime Images from the File animeImages=[]; jQuery.get("Images/Anime/Anime.txt", undefined, function(data) { //Prints the full data //console.log(data); var dataLength = data.split("\n").length; //Printing out the info for(var x = 8; x < dataLength-4; x+=6){ //console.log("Series: "+data.split("\n")[x]); //console.log("Character Name: "+data.split("\n")[x+1]); //console.log("Filename: "+data.split("\n")[x+2]); //console.log("Background Color: "+data.split("\n")[x+3]); //console.log("Image Source: "+data.split("\n")[x+4]); //console.log(" "); var anime = { series : data.split("\n")[x], name : data.split("\n")[x+1], filename: data.split("\n")[x+2], backgroundColor: data.split("\n")[x+3], sprite: null, source: data.split("\n")[x+4] } animeImages.push(anime); } }, "html").done(function() { //console.log("second success"); }).fail(function(jqXHR, textStatus) { console.log("failed"); }).always(function() { console.log("Anime Loaded - "+animeImages.length+" images"); }); } // Load Cartoon Images function load_Cartoon_Images(){ //Loading Cartoon Images from the File cartoonImages=[]; jQuery.get("Images/Cartoon/Cartoon.txt", undefined, function(data) { //Prints the full data //console.log(data); var dataLength = data.split("\n").length; //Printing out the info for(var x = 8; x < dataLength-4; x+=6){ var cartoon = { series : data.split("\n")[x], name : data.split("\n")[x+1], filename: data.split("\n")[x+2], backgroundColor: data.split("\n")[x+3], sprite: null, source: data.split("\n")[x+4] } cartoonImages.push(cartoon); } }, "html").done(function() { //console.log("second success"); }).fail(function(jqXHR, textStatus) { console.log("failed"); }).always(function() { console.log("Cartoon Loaded - "+cartoonImages.length+" images"); }); } // Load Game Images function load_Game_Images(){ //Loading Game Images from the File gameImages=[]; jQuery.get("Images/Game/Game.txt", undefined, function(data) { //Prints the full data //console.log(data); var dataLength = data.split("\n").length; //Printing out the info for(var x = 8; x < dataLength-4; x+=6){ var game = { series : data.split("\n")[x], name : data.split("\n")[x+1], filename: data.split("\n")[x+2], backgroundColor: data.split("\n")[x+3], sprite: null, source: data.split("\n")[x+4] } gameImages.push(game); } }, "html").done(function() { //console.log("second success"); }).fail(function(jqXHR, textStatus) { console.log("failed"); }).always(function() { console.log("Games Loaded - "+gameImages.length+" images"); }); } // Load Additional Images function load_Additional_Images(){ //Loading Additional Images from the File additionalImages=[]; var loader = new THREE.TextureLoader(); loader.crossOrigin = true; //Left Siding var Texture = loader.load( 'Images/Additional Images/leftSide.png'); Texture.minFilter = THREE.LinearFilter; var Imagecover = new THREE.SpriteMaterial( { map: Texture, color: 0xffffff } ); Imagecover.x = 0; Imagecover.y = 0; additionalImages.push(Imagecover); //Right Siding Texture = loader.load( 'Images/Additional Images/rightSide.png'); Texture.minFilter = THREE.LinearFilter; Imagecover = new THREE.SpriteMaterial( { map: Texture, color: 0xffffff } ); Imagecover.x = 0; Imagecover.y = 0; additionalImages.push(Imagecover); //Time Up's Upload Texture = loader.load( "Images/Additional Images/Time's Up.png"); Texture.minFilter = THREE.LinearFilter timesUp = new THREE.Sprite(); timesUp.material = new THREE.SpriteMaterial( { map: Texture, color: 0xffffff } ); //timesUp.material.color = new THREE.Color("rgb(255,255,255)"); timesUp.position.set(0, -4, 1.5); //xyz timesUp.scale.set(38,10,1); timesUp.name = "timesUp"; } //Load and a preset the images for use function loadImages(pictureName, characterName, type, cardBackgroundColor, source){ //Sprites var loader = new THREE.TextureLoader(); loader.crossOrigin = true; var Texture = loader.load( 'Images/'+type+'/'+pictureName); Texture.minFilter = THREE.LinearFilter; var Imagecover = new THREE.SpriteMaterial( { map: Texture, color: 0xffffff } ); Imagecover.mediaType = type; Imagecover.source = source; Imagecover.revealed = false; Imagecover.team = null; Imagecover.characterName = characterName; console.log(Imagecover.characterName ); Imagecover.cardBackgroundColor = cardBackgroundColor; Imagecover.x = 0; Imagecover.y = 0; return Imagecover; } //Load and a preset the images for use function loadImagesfromText(pictureName, type, cardBackgroundColor){ //Sprites var loader = new THREE.TextureLoader(); loader.crossOrigin = true; var Texture = loader.load( 'Images/'+type+'/'+pictureName); Texture.minFilter = THREE.LinearFilter; var Imagecover = new THREE.SpriteMaterial( { map: Texture, color: 0xffffff } ); Imagecover.revealed = false; Imagecover.team = null; Imagecover.cardBackgroundColor = cardBackgroundColor; Imagecover.x = 0; Imagecover.y = 0; return Imagecover; } // Fills the Library of Cards for the Chosen Categories function fillLibraryOfImages(){ // I have an additional section size here because at times a player may not want a section... // in other words, setting it to zero var dataArray = []; // Animals animalSize = 0; if(animals.includeCards) animalSize = animalImages.length; // Animes animeSize = 0; if(anime.includeCards) animeSize = animeImages.length; // Cartoons cartoonSize = 0; if(cartoon.includeCards) cartoonSize = cartoonImages.length; // Games gameSize = 0; if(game.includeCards) gameSize = gameImages.length; // First get a tally of all applicable Images var totalImages = animalSize + animeSize + cartoonSize + gameSize; var listOfRandomImages = []; if(totalImages >= 24){ if(totalImages > 24){ //Randomly Select Characters while(listOfRandomImages.length <= 23){ var arrayNumber = Math.floor(Math.random()*totalImages); var uniqueNumber = true; for(var x=0; x<listOfRandomImages.length && uniqueNumber != false; x++){ if( listOfRandomImages[x] == arrayNumber) uniqueNumber = false; } //console.log("Results: "+uniqueNumber+" arrayNumber = "+arrayNumber); if(uniqueNumber == true){ //console.log("Results: "+uniqueNumber+" arrayNumber = "+arrayNumber); listOfRandomImages.push(arrayNumber); } } //Load the Images from various sections while(listOfRandomImages.length >= 1){ // First Checks if its under the animals sections and if it is... add the animal if(listOfRandomImages[0] < animalSize){ var x = listOfRandomImages[0]; if(animalImages[x].sprite == null){ animalImages[x].sprite = loadImagesfromText(animalImages[x].filename,"Animals",animalImages[x].backgroundColor); animalImages[x].sprite.characterName = animalImages[x].name; animalImages[x].sprite.series = animalImages[x].series; animalImages[x].sprite.backgroundColor = animalImages[x].backgroundColor; } var a = { name: animalImages[x].name, series: animalImages[x].series, type: null } dataArray.push(a) libraryOfImages.push(animalImages[x].sprite); listOfRandomImages.shift(); } //Secondly Check if it under anime and if it is add the anime else if((listOfRandomImages[0]-animalSize) < animeSize){ var x = listOfRandomImages[0]-animalSize; if(animeImages[x].sprite == null){ animeImages[x].sprite = loadImagesfromText(animeImages[x].filename,"Anime",animeImages[x].backgroundColor); animeImages[x].sprite.characterName = animeImages[x].name; animeImages[x].sprite.series = animeImages[x].series; animeImages[x].sprite.backgroundColor = animeImages[x].backgroundColor; } var a = { name: animeImages[x].name, series: animeImages[x].series, type: null } dataArray.push(a) libraryOfImages.push(animeImages[x].sprite); listOfRandomImages.shift(); } // Thirdly Check if it under Cartoon and if it is add the Cartoon else if((listOfRandomImages[0]-animalSize-animeSize) < cartoonSize){ var x = listOfRandomImages[0]-animalSize-animeSize; if(cartoonImages[x].sprite == null){ cartoonImages[x].sprite = loadImagesfromText(cartoonImages[x].filename,"Cartoon",cartoonImages[x].backgroundColor); cartoonImages[x].sprite.characterName = cartoonImages[x].name; cartoonImages[x].sprite.backgroundColor = cartoonImages[x].backgroundColor; } var c = { name: cartoonImages[x].name, series: cartoonImages[x].series, type: null } dataArray.push(c) libraryOfImages.push(cartoonImages[x].sprite); listOfRandomImages.shift(); } // Lastly Check if it under game and if it is add the game else if((listOfRandomImages[0]-animalSize-animeSize-cartoonSize) < gameSize){ var x = listOfRandomImages[0]-animalSize-animeSize-cartoonSize; if(gameImages[x].sprite == null){ gameImages[x].sprite = loadImagesfromText(gameImages[x].filename,"Game",gameImages[x].backgroundColor); gameImages[x].sprite.characterName = gameImages[x].name; gameImages[x].sprite.backgroundColor = gameImages[x].backgroundColor; } var g = { name: gameImages[x].name, series: gameImages[x].series, type: null } dataArray.push(g) libraryOfImages.push(gameImages[x].sprite); listOfRandomImages.shift(); } } } else{ //Upload everything } displayPlaceHolders(); //var data = {cards: libraryOfImages}; var data = {cards: dataArray}; console.log(data); Codename.emit('Start Game', data); } else{ console.log("Not enough images.... sorry"); } } //Displays the Cards on the Table from the Library function displayPlaceHolders(){ var initialHeight = 11.75; for(var x = 0; x<libraryOfImages.length && x < 24; x++){ //Set the Image var tempScene = new THREE.Sprite(); tempScene.material = libraryOfImages[x]; tempScene.position.set((x%6)*8-20,-10*Math.floor(x/6)+initialHeight,-2); //xyz tempScene.scale.set(6,8,1); imagesOnDisplay.push(tempScene); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); cardsTable[x].image = imagesOnDisplay[imagesOnDisplay.length-1]; //Add the Card var cardHolder = new THREE.Sprite(); //Set the Card Background Color //Make the Card Background Black if(tempScene.material.backgroundColor.trim() == "Black") cardHolder.material = create_Black_Card(); //Make the Card Background White else if(tempScene.material.backgroundColor.trim() == "White") cardHolder.material = create_White_Card(); cardHolder.posX =(x%6)*8-20; cardHolder.posY = -10*Math.floor(x/6)+initialHeight-1; cardHolder.posZ = -2.1; cardHolder.position.set(cardHolder.posX, cardHolder.posY, cardHolder.posZ); //xyz cardHolder.scale.set(7,9,1); cardHolder.name = "cardHolder"; cardHolder.revealed = false; cardHolder.cardNumber = x; imagesOnDisplay.push(cardHolder); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); objects.push(imagesOnDisplay[imagesOnDisplay.length-1]); cardsTable[x].card = imagesOnDisplay[imagesOnDisplay.length-1]; //Add the Character's Name //console.log(libraryOfImages[x].characterName); var text = text_creation(libraryOfImages[x].characterName,0,2,0.75); var fontSize = (text.parameters.text.length-8); //console.log(text.parameters.text+ " - "+fontSize); if(fontSize <= 0) text.parameters.font= "115px Arial"; else{ text.parameters.lineHeight=0.75 - fontSize*0.025; fontSize = 115 - (fontSize*4); text.parameters.font= fontSize+"px Arial"; } //text.parameters.font= "70px Arial"; text.parameters.fillStyle= "Yellow"; text.position.set((x%6)*8-20,-9.97*Math.floor(x/6)+initialHeight-4.58, -1.9); text.scale.set(6,1.25,1); text.update(); imagesOnDisplay.push(text); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); cardsTable[x].text = imagesOnDisplay[imagesOnDisplay.length-1]; } //Left Siding var leftSiding = new THREE.Sprite(); leftSiding.material = additionalImages[0]; leftSiding.material.color = new THREE.Color("rgb(23,155,220)"); leftSiding.position.set(-16.25,-16.5,-2.5); //xyz leftSiding.scale.set(20,20,1); leftSiding.name = "leftSiding"; leftSiding.style = 1; imagesOnDisplay.push(leftSiding); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); //Right Siding var rightSiding = new THREE.Sprite(); rightSiding.material = additionalImages[1]; rightSiding.material.color = new THREE.Color("rgb(220,53,53)"); rightSiding.position.set(16.25,-16.5,-2.5); //xyz rightSiding.scale.set(20,20,1); rightSiding.name = "rightSiding"; rightSiding.style = 1; imagesOnDisplay.push(rightSiding); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); //Left Sides Scoring //for(var x = 0; x < 8; x++){ Gonna make it always the Left side for now for(var x = 0; x < 9; x++){ var score = new THREE.Sprite(); score.material = create_Left_Score(); score.material.color = new THREE.Color("rgb(255,255,255)"); // score.position.set(-22.5+x*2.75, initialHeight + 6.5,-2); // For the temp fix Ima make it start a little earlier score.position.set(-23.5+x*2.5, initialHeight + 6.5,-2); score.scale.set(1.75, 3.5,1); score.name = "Left Score "+x; ScoreBoard.push(score); scene.add(ScoreBoard[ScoreBoard.length-1]); } //Right Sides Scoring for(var x = 0; x < 8; x++){ var score = new THREE.Sprite(); score.material = create_Right_Score(); score.material.color = new THREE.Color("rgb(255,255,255)"); score.position.set(22.5-x*2.75, initialHeight + 6.5,-2); //xyz score.scale.set(1.75, 3.5,1); score.name = "Right Score "+(7-x); score.style = 1; ScoreBoard.push(score); scene.add(ScoreBoard[ScoreBoard.length-1]); } // Team 1 Name if(team1 == null){ team1 = text_creation("Team Aqua",1,3,0.75); team1.parameters.font= "125px Arial"; team1.parameters.fillStyle= "#179ADC"; // rgb(23,155,220) team1.position.set(-19, 24, -2.2); team1.scale.set(15, 7,1); team1.update(); } imagesOnDisplay.push(team1); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); // Team 2 Name if(team2 == null){ team2 = text_creation("Team Magma",1,3,0.75); team2.parameters.font= "125px Arial"; team2.parameters.fillStyle= "#DC3535"; // rgb(220,53,53) team2.position.set(17, 24, -2.2); team2.scale.set(15, 7,1); team2.update(); } imagesOnDisplay.push(team2); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); // Timer if(timer == null){ timer = text_creation("Timer",0,2,0.75); timer.parameters.font= "135px Arial"; timer.name = "timer"; timer.parameters.fillStyle= "#ffffff"; // rgb(220,53,53) timer.posX = 0; timer.posY = initialHeight + 6.75; timer.posZ = -2.2; timer.position.set( timer.posX, timer.posY, timer.posZ); timer.scale.set(5, 3,1); timer.update(); } imagesOnDisplay.push(timer); scene.add(imagesOnDisplay[imagesOnDisplay.length-1]); objects.push(imagesOnDisplay[imagesOnDisplay.length-1]); //Start the steps steps = 0; } // Load Text function load_Text_and_Buttons(){ // Start Game startGame = text_creation( "Start Game", 0, 3, 0.8); startGame.parameters.font= "135px Arial"; startGame.parameters.fillStyle= "White"; startGame.posX = 0; startGame.posY = -10; startGame.posZ = -1.9; startGame.position.set( startGame.posX, startGame.posY, startGame.posZ); startGame.scale.set(23,5,1); startGame.name = "Start Game"; startGame.update(); // Credits ---------------------- credits = text_creation( "Credits", 0, 3, 0.8); credits.parameters.font= "135px Arial"; credits.parameters.fillStyle= "Gold"; credits.posX = 19; credits.posY = -22; credits.posZ= -2; credits.position.set( credits.posX, credits.posY, credits.posZ); credits.scale.set(14,3,1); credits.name = "Credits"; credits.update(); //Series Title seriesTitle = text_creation( "Series: abcdefghijklmnopqrstuvwxyz", 0, 4, 0.68); seriesTitle.parameters.font= "105px Arial"; seriesTitle.parameters.fillStyle= "White"; seriesTitle.parameters.align= "left"; seriesTitle.posX = 5; seriesTitle.posY = -16; seriesTitle.posZ= -2; seriesTitle.position.set( seriesTitle.posX, seriesTitle.posY, seriesTitle.posZ); seriesTitle.scale.set(24,3.5,1); seriesTitle.name = "seriesTitle"; seriesTitle.update(); //Character Name characterName = text_creation( "abcdefghijklmnopqrstuvwxyz", 0, 4, 0.68); characterName.parameters.font= "120px Arial"; characterName.parameters.fillStyle= "White"; characterName.posX = 3; characterName.posY = -12; characterName.posZ= -2; characterName.position.set( characterName.posX, characterName.posY, characterName.posZ); characterName.scale.set(24,3.5,1); characterName.name = "characterName"; characterName.update(); //Image Source imageSource = text_creation( "Link to Image Source", 0, 4, 0.68); imageSource.parameters.font= "100px Arial"; imageSource.parameters.fillStyle= "palegoldenrod"; imageSource.posX = 19; imageSource.posY = -22; imageSource.posZ= -2; imageSource.position.set( imageSource.posX, imageSource.posY, imageSource.posZ); imageSource.scale.set(24,3.5,1); imageSource.name = "imageSource"; imageSource.url = null; imageSource.update(); // About about = text_creation( "About", 0, 3, 0.8); about.parameters.font= "135px Arial"; about.parameters.fillStyle= "#4169E1"; about.posX = -19; about.posY = -22; about.posZ= -2; about.position.set( about.posX, about.posY, about.posZ); about.scale.set(14,3,1); about.name = "About"; about.update(); // Section Title sectionTitle = text_creation( "Credits:", 0, 3, 0.8); sectionTitle.parameters.font= "135px Arial"; sectionTitle.parameters.fillStyle= "Gold"; sectionTitle.position.set(-19.5, 22, -1.9); sectionTitle.scale.set(23,5,1); sectionTitle.name = "sectionTitle"; sectionTitle.update(); // Card // Card Text cardText = text_creation( "...", 0, 3, 0.8); cardText.parameters.font= "135px Arial"; cardText.parameters.fillStyle= "White"; cardText.position.set(0, -10, -1.9); cardText.scale.set(23,5,1); cardText.name = "Card Text"; cardText.update(); // Return to Start Screen returnToStartScreen = text_creation( "Return to Start Screen", 0, 3, 0.8); returnToStartScreen.parameters.font= "115px Arial"; returnToStartScreen.parameters.fillStyle= "White"; returnToStartScreen.posX = -19; returnToStartScreen.posY = -22; returnToStartScreen.posZ= -2; returnToStartScreen.position.set( returnToStartScreen.posX, returnToStartScreen.posY, returnToStartScreen.posZ); returnToStartScreen.scale.set(14,3,1); returnToStartScreen.name = "Return"; returnToStartScreen.update(); //Fills categories categories = []; // Categories Title Categories = text_creation( "Categories:", 0, 3, 0.67); Categories.parameters.font= "135px Arial"; Categories.parameters.fillStyle= "White"; Categories.position.set( 0, 22, -2); Categories.scale.set(23,5,1); Categories.name = "Categories"; Categories.update(); categories.push(Categories); // Animals animals = text_creation( "Animals", 0, 3, 0.7); animals.parameters.font= "135px Arial"; animals.parameters.fillStyle= "#A070F0"; animals.posX = -9; animals.posY = 13; animals.posZ = -2; animals.position.set( animals.posX, animals.posY, animals.posZ); animals.scale.set(14,3,1); animals.name = "animals"; animals.includeCards = true; animals.credits = true; animals.update(); categories.push(animals); // Anime anime = text_creation( "Animes", 0, 3, 0.7); anime.parameters.font= "135px Arial"; anime.parameters.fillStyle= "Crimson"; anime.posX = -18; anime.posY = 17; anime.posZ = -2; anime.position.set( anime.posX, anime.posY, anime.posZ); anime.scale.set(14,3,1); anime.name = "anime"; anime.includeCards = true; anime.credits = true; anime.update(); categories.push(anime); // Cartoon cartoon = text_creation( "Cartoons", 0, 3, 0.7); cartoon.parameters.font= "135px Arial"; cartoon.parameters.fillStyle= "orangered"; cartoon.posX = 0; cartoon.posY = 17; cartoon.posZ = -2; cartoon.position.set( cartoon.posX, cartoon.posY, cartoon.posZ); cartoon.scale.set(14,3,1); cartoon.name = "cartoon"; cartoon.includeCards = true; cartoon.credits = true; cartoon.update(); categories.push(cartoon); // Games game = text_creation( "Games", 0, 3, 0.7); game.parameters.font= "135px Arial"; game.parameters.fillStyle= "Lime"; game.posX = 18; game.posY = 17; game.posZ = -2; game.position.set( game.posX, game.posY, game.posZ); game.scale.set(14,3,1); game.name = "game"; game.includeCards = true; game.credits = true; game.update(); categories.push(game); // cardSheet cardSheet = text_creation( "Card Sheet", 0, 3, 0.8); cardSheet.parameters.font= "135px Arial"; cardSheet.parameters.fillStyle= "#3f3f3f"; cardSheet.posX = 0; cardSheet.posY = -22; cardSheet.posZ= -2; cardSheet.position.set( cardSheet.posX, cardSheet.posY, cardSheet.posZ); cardSheet.scale.set(14,3,1); cardSheet.name = "cardSheet"; cardSheet.update(); // Board Display boardDisplay = text_creation( "Board Display", 0, 3, 0.8); boardDisplay.parameters.font= "135px Arial"; boardDisplay.parameters.fillStyle= "#3f3f3f"; boardDisplay.posX = -17; boardDisplay.posY = 15; boardDisplay.posZ= -2; boardDisplay.position.set( boardDisplay.posX, boardDisplay.posY, boardDisplay.posZ); boardDisplay.scale.set(14,3,1); boardDisplay.name = "boardDisplay"; boardDisplay.update(); // List Display listDisplay = text_creation( "List Display", 0, 3, 0.8); listDisplay.parameters.font= "135px Arial"; listDisplay.parameters.fillStyle= "#3f3f3f"; listDisplay.posX = -2; listDisplay.posY = 15; listDisplay.posZ= -2; listDisplay.position.set( listDisplay.posX, listDisplay.posY, listDisplay.posZ); listDisplay.scale.set(14,3,1); listDisplay.name = "listDisplay"; listDisplay.update(); } // Setting the cards function divide_cards_into_teams(){ //Help from http://www.color-blindness.com/coblis-color-blindness-simulator/ // "Blue" - Team Aqua // "Red" - Team Magma // "Dark Grey" - Civilian // "Assassin" - var cardsArray= ["Blue","Red","Dark Grey","Assassin" ]; console.log("Cards Array Lengt : "+cardsArray.length); //cardsArray.push } // Generate Unique card with a White Background function create_White_Card(){ //card; if(saveCardWhiteTexture == null){ var loader = new THREE.TextureLoader(); loader.crossOrigin = true; saveCardWhiteTexture = loader.load( 'Images/Additional Images/whiteBackground.png' ); saveCardWhiteTexture.minFilter = THREE.LinearFilter; } var Cards = new THREE.SpriteMaterial( { map: saveCardWhiteTexture, color: 0xffffff } ); return Cards; } // Generate Unique card with a Black Background function create_Black_Card(){ //card; if(saveCardBlackTexture == null){ var loader = new THREE.TextureLoader(); loader.crossOrigin = true; saveCardBlackTexture = loader.load( 'Images/Additional Images/blackBackground.png' ); saveCardBlackTexture.minFilter = THREE.LinearFilter; } var Cards = new THREE.SpriteMaterial( { map: saveCardBlackTexture, color: 0xffffff } ); return Cards; } // Left Score Creation function create_Left_Score(){ //Score if(saveLeftScoreTexture == null){ var loader = new THREE.TextureLoader(); loader.crossOrigin = true; saveLeftScoreTexture = loader.load( 'Images/Additional Images/leftScores.png' ); saveLeftScoreTexture.minFilter = THREE.LinearFilter; } var Score = new THREE.SpriteMaterial( { map: saveLeftScoreTexture, color: 0xffffff } ); return Score; } // Right Score Creation function create_Right_Score(){ //Score if(saveRightScoreTexture == null){ var loader = new THREE.TextureLoader(); loader.crossOrigin = true; saveRightScoreTexture = loader.load( 'Images/Additional Images/rightScores.png' ); saveRightScoreTexture.minFilter = THREE.LinearFilter; } var Score = new THREE.SpriteMaterial( { map: saveRightScoreTexture, color: 0xffffff } ); return Score; } // function create_Square(){ var geometry = new THREE.PlaneBufferGeometry (7, 6,0); var material = new THREE.MeshBasicMaterial( { color: 0xfafafa } ); var cards = new THREE.Mesh( geometry, material ); //cards.material.transparent = true; //cards.material.opacity = 0.3; cards.name = "cards"; return cards; } //Text Creation Function //Since this is used more than 10 times throughout the code //I created this function to cut down on the length and effort function text_creation(textValue, heightPower, widthPower, lineHeight ){ var texts = new THREEx.DynamicText2DObject(); texts.parameters.text = textValue; //HeightPower //The HeightPower works in the power of two and starts with 2^7 = 128 //The height for the canvas works like this = 2^(7+heightPower); texts.dynamicTexture.canvas.height = Math.pow(2, 7+heightPower); //WidthPower //The WidthPower works in the power of two and starts with 2^7 = 128 //The width for the canvas works like this = 2^(7+widthPower); texts.dynamicTexture.canvas.width = Math.pow(2, 7+widthPower); /** Powers of 2 2^(7) = 128 2^(8) = 256 2^(9) = 512 2^(10) = 1024 2^(11) = 2048 2^(12) = 4096 **/ //Line Height //The higher the value the higher gap texts.parameters.lineHeight= lineHeight; texts.parameters.align = "center"; texts.update(); return texts; } // Selection Creation for the Credits Selection // SelectionNumber is the number order of the Selections starting from 0 - > 9 function credits_Selection_Creation( selectionNumber){ var selection = text_creation( "selection", 0, 3, 0.8); selection.parameters.font= "95px Arial"; selection.parameters.fillStyle= "Black"; selection.posX = -16.5; selection.posY = selectionNumber*-2.5 + 5; selection.posZ = -2.9; selection.position.set( selection.posX, selection.posY, selection.posZ); selection.scale.set(14,3,1); selection.arrayNumber = 0; selection.name = "Selection"; selection.parameters.align = "left" selection.update(); return selection; } } window.onload = init;
if(categories[1].credits == true){ if(animeImages[event.object.arrayNumber].sprite == null){ animeImages[event.object.arrayNumber].sprite = loadImagesfromText(animeImages[event.object.arrayNumber].filename,"Anime",animeImages[event.object.arrayNumber].backgroundColor); animeImages[event.object.arrayNumber].sprite.characterName = animeImages[event.object.arrayNumber].name;
protodesc_test.go
package protodesc import ( "context" "testing" "github.com/pallscall/ghz/internal" "github.com/jhump/protoreflect/grpcreflect" "github.com/stretchr/testify/assert" "google.golang.org/grpc" "google.golang.org/grpc/metadata" reflectpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" ) func TestProtodesc_GetMethodDescFromProto(t *testing.T) { t.Run("invalid path", func(t *testing.T) { md, err := GetMethodDescFromProto("pkg.Call", "invalid.proto", []string{}) assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid call symbol", func(t *testing.T) { md, err := GetMethodDescFromProto("pkg.Call", "../testdata/greeter.proto", []string{}) assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid package", func(t *testing.T) { md, err := GetMethodDescFromProto("helloworld.pkg.SayHello", "../testdata/greeter.proto", []string{}) assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid method", func(t *testing.T) { md, err := GetMethodDescFromProto("helloworld.Greeter.Foo", "../testdata/greeter.proto", []string{}) assert.Error(t, err) assert.Nil(t, md) }) t.Run("valid symbol", func(t *testing.T) { md, err := GetMethodDescFromProto("helloworld.Greeter.SayHello", "../testdata/greeter.proto", []string{}) assert.NoError(t, err) assert.NotNil(t, md) }) t.Run("valid symbol slashes", func(t *testing.T) { md, err := GetMethodDescFromProto("helloworld.Greeter/SayHello", "../testdata/greeter.proto", []string{}) assert.NoError(t, err) assert.NotNil(t, md) }) t.Run("proto3 optional support", func(t *testing.T) { md, err := GetMethodDescFromProto("helloworld.OptionalGreeter/SayHello", "../testdata/optional.proto", []string{}) assert.NoError(t, err) assert.NotNil(t, md) }) } func TestProtodesc_GetMethodDescFromProtoSet(t *testing.T) { t.Run("invalid path", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("pkg.Call", "invalid.protoset") assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid call symbol", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("pkg.Call", "../testdata/bundle.protoset") assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid package", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("helloworld.pkg.SayHello", "../testdata/bundle.protoset") assert.Error(t, err) assert.Nil(t, md) }) t.Run("invalid method", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("helloworld.Greeter.Foo", "../testdata/bundle.protoset") assert.Error(t, err) assert.Nil(t, md) }) t.Run("valid symbol", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("helloworld.Greeter.SayHello", "../testdata/bundle.protoset") assert.NoError(t, err) assert.NotNil(t, md) }) t.Run("valid symbol proto 2", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("cap.Capper.Cap", "../testdata/bundle.protoset") assert.NoError(t, err) assert.NotNil(t, md) }) t.Run("valid symbol slashes", func(t *testing.T) { md, err := GetMethodDescFromProtoSet("helloworld.Greeter/SayHello", "../testdata/bundle.protoset") assert.NoError(t, err) assert.NotNil(t, md) }) } func TestParseServiceMethod(t *testing.T) { testParseServiceMethodSuccess(t, "package.Service.Method", "package.Service", "Method") testParseServiceMethodSuccess(t, ".package.Service.Method", "package.Service", "Method") testParseServiceMethodSuccess(t, "package.Service/Method", "package.Service", "Method") testParseServiceMethodSuccess(t, ".package.Service/Method", "package.Service", "Method") testParseServiceMethodSuccess(t, "Service.Method", "Service", "Method") testParseServiceMethodSuccess(t, ".Service.Method", "Service", "Method") testParseServiceMethodSuccess(t, "Service/Method", "Service", "Method") testParseServiceMethodSuccess(t, ".Service/Method", "Service", "Method") testParseServiceMethodError(t, "") testParseServiceMethodError(t, ".") testParseServiceMethodError(t, "package/Service/Method") } func testParseServiceMethodSuccess(t *testing.T, svcAndMethod string, expectedService string, expectedMethod string) { service, method, err := parseServiceMethod(svcAndMethod) assert.NoError(t, err) assert.Equal(t, expectedService, service) assert.Equal(t, expectedMethod, method) } func
(t *testing.T, svcAndMethod string) { _, _, err := parseServiceMethod(svcAndMethod) assert.Error(t, err) } func TestProtodesc_GetMethodDescFromReflect(t *testing.T) { _, s, err := internal.StartServer(false) if err != nil { assert.FailNow(t, err.Error()) } defer s.Stop() t.Run("test known call", func(t *testing.T) { var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) ctx := context.Background() conn, err := grpc.DialContext(ctx, internal.TestLocalhost, opts...) assert.NoError(t, err) md := make(metadata.MD) refCtx := metadata.NewOutgoingContext(ctx, md) refClient := grpcreflect.NewClient(refCtx, reflectpb.NewServerReflectionClient(conn)) mtd, err := GetMethodDescFromReflect("helloworld.Greeter.SayHello", refClient) assert.NoError(t, err) assert.NotNil(t, mtd) assert.Equal(t, "SayHello", mtd.GetName()) }) t.Run("test known call with /", func(t *testing.T) { var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) ctx := context.Background() conn, err := grpc.DialContext(ctx, internal.TestLocalhost, opts...) assert.NoError(t, err) md := make(metadata.MD) refCtx := metadata.NewOutgoingContext(ctx, md) refClient := grpcreflect.NewClient(refCtx, reflectpb.NewServerReflectionClient(conn)) mtd, err := GetMethodDescFromReflect("helloworld.Greeter/SayHello", refClient) assert.NoError(t, err) assert.NotNil(t, mtd) assert.Equal(t, "SayHello", mtd.GetName()) }) t.Run("test unknown known call", func(t *testing.T) { var opts []grpc.DialOption opts = append(opts, grpc.WithInsecure()) ctx := context.Background() conn, err := grpc.DialContext(ctx, internal.TestLocalhost, opts...) assert.NoError(t, err) md := make(metadata.MD) refCtx := metadata.NewOutgoingContext(ctx, md) refClient := grpcreflect.NewClient(refCtx, reflectpb.NewServerReflectionClient(conn)) mtd, err := GetMethodDescFromReflect("helloworld.Greeter/SayHelloAsdf", refClient) assert.Error(t, err) assert.Nil(t, mtd) }) }
testParseServiceMethodError
test_enums.py
# flake8: noqa from ocpp_d.v16.enums import * def test_authorization_status(): assert AuthorizationStatus.accepted == "Accepted" assert AuthorizationStatus.blocked == "Blocked" assert AuthorizationStatus.expired == "Expired" assert AuthorizationStatus.invalid == "Invalid" assert AuthorizationStatus.concurrent_tx == "ConcurrentTx" def test_availability_status(): assert AvailabilityStatus.accepted == "Accepted" assert AvailabilityStatus.rejected == "Rejected" assert AvailabilityStatus.scheduled == "Scheduled" def test_availability_type(): assert AvailabilityType.inoperative == "Inoperative" assert AvailabilityType.operative == "Operative" def test_cancel_reservation_status(): assert CancelReservationStatus.accepted == "Accepted" assert CancelReservationStatus.rejected == "Rejected" def test_charge_point_error_code(): assert (ChargePointErrorCode.connector_lock_failure == "ConnectorLockFailure") assert (ChargePointErrorCode.ev_communication_error == "EVCommunicationError") assert ChargePointErrorCode.ground_failure == "GroundFailure" assert (ChargePointErrorCode.high_temperature == "HighTemperature") assert ChargePointErrorCode.internal_error == "InternalError" assert (ChargePointErrorCode.local_list_conflict == "LocalListConflict") assert ChargePointErrorCode.no_error == "NoError" assert ChargePointErrorCode.other_error == "OtherError" assert (ChargePointErrorCode.over_current_failure == "OverCurrentFailure") assert ChargePointErrorCode.over_voltage == "OverVoltage" assert (ChargePointErrorCode.power_meter_failure == "PowerMeterFailure") assert (ChargePointErrorCode.power_switch_failure == "PowerSwitchFailure") assert ChargePointErrorCode.reader_failure == "ReaderFailure" assert ChargePointErrorCode.reset_failure == "ResetFailure" assert ChargePointErrorCode.under_voltage == "UnderVoltage" assert ChargePointErrorCode.weak_signal == "WeakSignal" def test_charge_point_status(): assert ChargePointStatus.available == 'Available' assert ChargePointStatus.preparing == 'Preparing' assert ChargePointStatus.charging == 'Charging' assert ChargePointStatus.suspended_evse == 'SuspendedEVSE' assert ChargePointStatus.suspended_ev == 'SuspendedEV' assert ChargePointStatus.finishing == 'Finishing' assert ChargePointStatus.reserved == 'Reserved' assert ChargePointStatus.unavailable == 'Unavailable' assert ChargePointStatus.faulted == 'Faulted' def test_charging_profile_kind_type(): assert ChargingProfileKindType.absolute == 'Absolute' assert ChargingProfileKindType.recurring == 'Recurring' assert ChargingProfileKindType.relative == 'Relative' def test_charging_profile_purpose_type(): assert (ChargingProfilePurposeType.charge_point_max_profile == 'ChargePointMaxProfile') assert (ChargingProfilePurposeType.tx_default_profile == 'TxDefaultProfile') assert ChargingProfilePurposeType.tx_profile == 'TxProfile' def test_charging_profile_status(): assert ChargingProfileStatus.accepted == "Accepted" assert ChargingProfileStatus.rejected == "Rejected" assert ChargingProfileStatus.not_supported == "NotSupported" def test_charging_rate_unit(): assert ChargingRateUnitType.watts == "W" assert ChargingRateUnitType.amps == "A" def test_clear_cache_status(): assert ClearCacheStatus.accepted == "Accepted" assert ClearCacheStatus.rejected == "Rejected" def test_clear_charging_profile_status(): assert ClearChargingProfileStatus.accepted == "Accepted" assert ClearChargingProfileStatus.unknown == "Unknown" def test_configuration_status(): assert ConfigurationStatus.accepted == "Accepted" assert ConfigurationStatus.rejected == "Rejected" assert ConfigurationStatus.reboot_required == "RebootRequired" assert ConfigurationStatus.not_supported == "NotSupported" def test_data_transfer_status(): assert DataTransferStatus.accepted == "Accepted" assert DataTransferStatus.rejected == "Rejected" assert (DataTransferStatus.unknown_message_id == "UnknownMessageId") assert DataTransferStatus.unknown_vendor_id == "UnknownVendorId" def test_diagnostics_status(): assert DiagnosticsStatus.idle == "Idle" assert DiagnosticsStatus.uploaded == "Uploaded" assert DiagnosticsStatus.upload_failed == "UploadFailed" assert DiagnosticsStatus.uploading == "Uploading" def test_firmware_status(): assert FirmwareStatus.downloaded == "Downloaded" assert FirmwareStatus.download_failed == "DownloadFailed" assert FirmwareStatus.downloading == "Downloading"
assert FirmwareStatus.installed == "Installed" def test_get_composite_schedule_status(): assert GetCompositeScheduleStatus.accepted == "Accepted" assert GetCompositeScheduleStatus.rejected == "Rejected" def test_location(): assert Location.inlet == "Inlet" assert Location.outlet == "Outlet" assert Location.body == "Body" assert Location.cable == "Cable" assert Location.ev == "EV" def test_measurand(): assert (Measurand.energy_active_export_register == "Energy.Active.Export.Register") assert (Measurand.energy_active_import_register == "Energy.Active.Import.Register") assert (Measurand.energy_reactive_export_register == "Energy.Reactive.Export.Register") assert (Measurand.energy_reactive_import_register == "Energy.Reactive.Import.Register") assert (Measurand.energy_active_export_interval == "Energy.Active.Export.Interval") assert (Measurand.energy_active_import_interval == "Energy.Active.Import.Interval") assert (Measurand.energy_reactive_export_interval == "Energy.Reactive.Export.Interval") assert (Measurand.energy_reactive_import_interval == "Energy.Reactive.Import.Interval") assert Measurand.frequency == "Frequency" assert Measurand.power_active_export == "Power.Active.Export" assert Measurand.power_active_import == "Power.Active.Import" assert Measurand.power_factor == "Power.Factor" assert Measurand.power_offered == "Power.Offered" assert (Measurand.power_reactive_export == "Power.Reactive.Export") assert (Measurand.power_reactive_import == "Power.Reactive.Import") assert Measurand.current_export == "Current.Export" assert Measurand.current_import == "Current.Import" assert Measurand.current_offered == "Current.Offered" assert Measurand.rpm == "RPM" assert Measurand.soc == "SoC" assert Measurand.voltage == "Voltage" assert Measurand.temperature == "Temperature" def test_message_trigger(): assert MessageTrigger.boot_notification == "BootNotification" assert (MessageTrigger.diagnostics_status_notification == "DiagnosticsStatusNotification") assert (MessageTrigger.firmware_status_notification == "FirmwareStatusNotification") assert MessageTrigger.heartbeat == "Heartbeat" assert MessageTrigger.meter_values == "MeterValues" assert (MessageTrigger.status_notification == "StatusNotification") def test_phase(): assert Phase.l1 == "L1" assert Phase.l2 == "L2" assert Phase.l3 == "L3" assert Phase.n == "N" assert Phase.l1_n == "L1-N" assert Phase.l2_n == "L2-N" assert Phase.l3_n == "L3-N" assert Phase.l1_l2 == "L1-L2" assert Phase.l2_l3 == "L2-L3" assert Phase.l3_l1 == "L3-L1" def test_reading_context(): assert (ReadingContext.interruption_begin == "Interruption.Begin") assert ReadingContext.interruption_end == "Interruption.End" assert ReadingContext.other == "Other" assert ReadingContext.sample_clock == "Sample.Clock" assert ReadingContext.sample_periodic == "Sample.Periodic" assert ReadingContext.transaction_begin == "Transaction.Begin" assert ReadingContext.transaction_end == "Transaction.End" assert ReadingContext.trigger == "Trigger" def test_reason(): assert Reason.emergency_stop == "EmergencyStop" assert Reason.ev_disconnected == "EVDisconnected" assert Reason.hard_reset == "HardReset" assert Reason.local == "Local" assert Reason.other == "Other" assert Reason.power_loss == "PowerLoss" assert Reason.reboot == "Reboot" assert Reason.remote == "Remote" assert Reason.soft_reset == "SoftReset" assert Reason.unlock_command == "UnlockCommand" assert Reason.de_authorized == "DeAuthorized" def test_recurrency_kind(): assert RecurrencyKind.daily == 'Daily' assert RecurrencyKind.weekly == 'Weekly' def test_registration_status(): assert RegistrationStatus.accepted == "Accepted" assert RegistrationStatus.pending == "Pending" assert RegistrationStatus.rejected == "Rejected" def test_remote_start_stop_status(): assert RemoteStartStopStatus.accepted == "Accepted" assert RemoteStartStopStatus.rejected == "Rejected" def test_reservation_status(): assert ReservationStatus.accepted == "Accepted" assert ReservationStatus.faulted == "Faulted" assert ReservationStatus.occupied == "Occupied" assert ReservationStatus.rejected == "Rejected" assert ReservationStatus.unavailable == "Unavailable" def test_reset_status(): assert ResetStatus.accepted == "Accepted" assert ResetStatus.rejected == "Rejected" def test_reset_type(): assert ResetType.hard == "Hard" assert ResetType.soft == "Soft" def test_trigger_message_status(): assert TriggerMessageStatus.accepted == "Accepted" assert TriggerMessageStatus.rejected == "Rejected" assert TriggerMessageStatus.not_implemented == "NotImplemented" def test_unit_of_measure(): assert UnitOfMeasure.wh == "Wh" assert UnitOfMeasure.kwh == "kWh" assert UnitOfMeasure.varh == "varh" assert UnitOfMeasure.kvarh == "kvarh" assert UnitOfMeasure.w == "W" assert UnitOfMeasure.kw == "kW" assert UnitOfMeasure.va == "VA" assert UnitOfMeasure.kva == "kVA" assert UnitOfMeasure.var == "var" assert UnitOfMeasure.kvar == "kvar" assert UnitOfMeasure.a == "A" assert UnitOfMeasure.v == "V" assert UnitOfMeasure.celsius == "Celsius" assert UnitOfMeasure.fahrenheit == "Fahrenheit" assert UnitOfMeasure.k == "K" assert UnitOfMeasure.percent == "Percent" assert UnitOfMeasure.hertz == "Hertz" def test_unlock_status(): assert UnlockStatus.unlocked == "Unlocked" assert UnlockStatus.unlock_failed == "UnlockFailed" assert UnlockStatus.not_supported == "NotSupported" def test_update_status(): assert UpdateStatus.accepted == "Accepted" assert UpdateStatus.failed == "Failed" assert UpdateStatus.not_supported == "NotSupported" assert UpdateStatus.version_mismatch == "VersionMismatch" def test_update_type(): assert UpdateType.differential == "Differential" assert UpdateType.full == "Full" def test_value_format(): assert ValueFormat.raw == "Raw" assert ValueFormat.signed_data == "SignedData"
assert FirmwareStatus.idle == "Idle" assert (FirmwareStatus.installation_failed == "InstallationFailed") assert FirmwareStatus.installing == "Installing"
views.py
from rest_framework import viewsets from periodic_tasks_api.models import CustomExtendedPeriodicTask from periodic_tasks_api.serializers import PeriodicTaskSerializer from periodic_tasks_api.filters import PeriodicTaskFilterSet class PeriodicTaskView(viewsets.ModelViewSet): queryset = CustomExtendedPeriodicTask.objects.all()
serializer_class = PeriodicTaskSerializer filter_backends = [PeriodicTaskFilterSet]
test_solver_classes.py
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name """ Tests for solver classes module. """ import numpy as np from qiskit import QiskitError from qiskit.quantum_info import Operator, Statevector, SuperOp, DensityMatrix from qiskit_dynamics import Solver from qiskit_dynamics.signals import Signal from ..common import QiskitDynamicsTestCase, TestJaxBase class TestSolverExceptions(QiskitDynamicsTestCase): """Tests for Solver exception raising based on input types.""" def setUp(self): X = Operator.from_label("X") self.ham_solver = Solver(hamiltonian_operators=[X], hamiltonian_signals=[1.0]) self.lindblad_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X] ) self.vec_lindblad_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[1.0], dissipator_operators=[X], evaluation_mode="dense_vectorized", ) def test_hamiltonian_shape_error(self): """Test error raising if invalid shape for Hamiltonian model.""" with self.assertRaises(QiskitError) as qe: self.ham_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0])) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.ham_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]])) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.ham_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0]))) self.assertTrue("Shape mismatch" in str(qe.exception)) def test_lindblad_shape_error(self): """Test error raising if invalid shape for Lindblad model.""" with self.assertRaises(QiskitError) as qe: self.lindblad_solver.solve([0.0, 1.0], np.array([1.0, 0.0, 0.0])) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.lindblad_solver.solve([0.0, 1.0], np.array([[[1.0, 0.0, 0.0]]])) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0]))) self.assertTrue("Shape mismatch" in str(qe.exception)) def test_vectorized_lindblad_shape_error(self): """Test error raising if invalid shape for vectorized Lindblad model.""" with self.assertRaises(QiskitError) as qe: self.vec_lindblad_solver.solve([0.0, 1.0], np.array([[1.0, 0.0], [0.0, 1.0]])) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.vec_lindblad_solver.solve([0.0, 1.0], DensityMatrix(np.array([1.0, 0.0, 0.0]))) self.assertTrue("Shape mismatch" in str(qe.exception)) with self.assertRaises(QiskitError) as qe: self.vec_lindblad_solver.solve([0.0, 1.0], Statevector(np.array([1.0, 0.0, 0.0]))) self.assertTrue("Shape mismatch" in str(qe.exception)) def test_non_vectorized_SuperOp_error(self): """Test SuperOp simulation attempt for non-vectorized Lindblad model.""" with self.assertRaises(QiskitError) as qe: self.lindblad_solver.solve([0.0, 1.0], SuperOp(np.eye(4))) self.assertTrue("Simulating SuperOp" in str(qe.exception)) class TestSolver(QiskitDynamicsTestCase): """Tests for Solver class.""" def setUp(self): """Set up some simple models.""" X = 2 * np.pi * Operator.from_label("X") / 2 Z = 2 * np.pi * Operator.from_label("Z") / 2 self.ham_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[Signal(1.0, 5.0)], drift=5 * Z, rotating_frame=5 * Z, ) self.rwa_ham_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[Signal(1.0, 5.0)], drift=5 * Z, rotating_frame=5 * Z, rwa_cutoff_freq=2 * 5.0, ) self.lindblad_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[Signal(1.0, 5.0)], dissipator_operators=[0.01 * X], drift=5 * Z, rotating_frame=5 * Z, ) self.vec_lindblad_solver = Solver( hamiltonian_operators=[X], hamiltonian_signals=[Signal(1.0, 5.0)], dissipator_operators=[0.01 * X], drift=5 * Z, rotating_frame=5 * Z, evaluation_mode="dense_vectorized", ) # lindblad solver with no dissipation for testing self.vec_lindblad_solver_no_diss = Solver( hamiltonian_operators=[X], hamiltonian_signals=[Signal(1.0, 5.0)], dissipator_operators=[0.0 * X], drift=5 * Z, rotating_frame=5 * Z, evaluation_mode="dense_vectorized", ) self.method = "DOP853" def test_lindblad_solve_statevector(self): """Test correct conversion of Statevector to DensityMatrix.""" results = self.lindblad_solver.solve( [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method ) self.assertTrue(isinstance(results.y[-1], DensityMatrix)) self.assertTrue(results.y[-1].data[0, 0] > 0.99 and results.y[-1].data[0, 0] < 0.999) def
(self): """Test correct conversion of Statevector to DensityMatrix and vectorized solving.""" results = self.vec_lindblad_solver.solve( [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method ) results2 = self.lindblad_solver.solve( [0.0, 1.0], y0=Statevector([0.0, 1.0]), method=self.method ) self.assertTrue(isinstance(results.y[-1], DensityMatrix)) self.assertAllClose(results.y[-1].data, results2.y[-1].data) def test_array_vectorized_lindblad(self): """Test Lindblad solver is array-vectorized.""" results = self.lindblad_solver.solve( [0.0, 1.0], y0=np.array([[[0.0, 0.0], [0.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]]]), method=self.method, ) self.assertTrue(results.y[-1][0, 0, 0] > 0.99 and results.y[-1][0, 0, 0] < 0.999) self.assertTrue(results.y[-1][1, 1, 1] > 0.99 and results.y[-1][1, 1, 1] < 0.999) def test_rwa_hamiltonian(self): """Test perfect inversion for pi pulse with RWA.""" results = self.rwa_ham_solver.solve( [0.0, 1.0], y0=np.array([0.0, 1.0]), atol=1e-10, rtol=1e-10, method=self.method ) self.assertTrue(np.abs(results.y[-1][0]) > (1 - 1e-8)) def test_hamiltonian_DensityMatrix(self): """Test correct conjugation of Hamiltonian-based density matrix simulation.""" results = self.ham_solver.solve( [0.0, 1.0], y0=DensityMatrix(np.array([0.0, 1.0])), atol=1e-10, rtol=1e-10, method=self.method, ) self.assertTrue(isinstance(results.y[-1], DensityMatrix)) self.assertTrue(np.abs(results.y[-1].data[0, 0]) > 0.999) def test_hamiltonian_SuperOp(self): """Test Hamiltonian-based SuperOp simulation.""" results = self.rwa_ham_solver.solve( [0.0, 1.0], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method ) self.assertTrue(isinstance(results.y[-1], SuperOp)) X = np.array([[0.0, 1.0], [1.0, 0.0]]) self.assertAllClose(results.y[-1].data, np.kron(X, X)) def test_hamiltonian_lindblad_SuperOp_consistency(self): """Test Hamiltonian-based SuperOp simulation.""" results = self.ham_solver.solve( [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10, method=self.method ) results2 = self.vec_lindblad_solver_no_diss.solve( [0.0, 0.432], y0=SuperOp(np.eye(4)), atol=1e-10, rtol=1e-10 ) self.assertAllClose(results.y[-1].data, results2.y[-1].data) class TestSolverJax(TestSolver, TestJaxBase): """JAX version of TestSolver.""" def setUp(self): """Set method to 'jax_odeint' to speed up running of jax version of tests.""" super().setUp() self.method = "jax_odeint" def test_jit_solve(self): """Test jitting setting signals and solving.""" def func(a): ham_solver = self.ham_solver.copy() ham_solver.signals = [Signal(lambda t: a, 5.0)] yf = ham_solver.solve( np.array([0.0, 1.0]), y0=np.array([0.0, 1.0]), method=self.method ).y[-1] return yf jit_func = self.jit_wrap(func) self.assertAllClose(jit_func(2.0), func(2.0)) def test_jit_grad_solve(self): """Test jitting setting signals and solving.""" def func(a): lindblad_solver = self.lindblad_solver.copy() lindblad_solver.signals = [[Signal(lambda t: a, 5.0)], [1.0]] yf = lindblad_solver.solve( [0.0, 1.0], y0=np.array([[0.0, 1.0], [0.0, 1.0]]), method=self.method ).y[-1] return yf jit_grad_func = self.jit_grad_wrap(func) jit_grad_func(1.0)
test_vec_lindblad_statevector
test_doc_table.py
# -*- coding: utf-8 -*- """ test_doc_table ~~~~~~~~~~~~~~ Test the Table Document element. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import logging import unittest from chemdataextractor.doc.table import Table, Cell from chemdataextractor.doc.text import Caption logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) class TestTable(unittest.TestCase): """Simple Table instantiation tests.""" maxDiff = None def test_uvvis_table(self):
= Table( caption=Caption('Spectroscopic properties of Coumarins in acetonitrile at 298 K.'), headings=[ [ Cell(''), # Blank compound heading Cell('λmax (nm)'), Cell('ε (M–1 cm–1)'), Cell('λem (nm)'), Cell('ϕ') ] ], rows=[ [Cell('Coumarin 343'), Cell('398'), Cell('40 800'), Cell('492'), Cell('0.52')], [Cell('C144'), Cell('429'), Cell('9500'), Cell('601'), Cell('N/A')], [Cell('Coumarin 34'), Cell('269'), Cell('-'), Cell('435'), Cell('<0.01')], ] ) # for record in t.caption.records: # print(record.to_primitive()) # print(record.is_contextual) gold = [ {'names': ['Coumarin 343'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '0.52', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '398'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '40800', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]} ]}, {'labels': ['C144'], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '429'}]}, {'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'extinction': '9500', 'extinction_units': 'M \u2013 1 cm \u2013 1'}]}]}, {'names': ['Coumarin 34'], 'quantum_yields': [{'type': '\u03d5', 'solvent': 'acetonitrile', 'value': '<0.01', 'temperature': '298', 'temperature_units': 'K'}], 'uvvis_spectra': [{'temperature': '298', 'temperature_units': 'K', 'solvent': 'acetonitrile', 'peaks': [{'units': 'nm', 'value': '269'}]}]}, {'names': ['Coumarins']}, {'names': ['acetonitrile']} ] # for record in t.records: # print(record.to_primitive()) self.assertEqual(gold, [record.serialize() for record in t.records]) if __name__ == '__main__': unittest.main()
"""""" t = Table( caption=Caption('Selected photophysical properties of biarylsubstituted pyrazoles 5–8 and 1-methyl-3,5-diphenylpyrazole (9) at room temperature'), headings=[ [ Cell('Compound'), Cell('Absorption maxima λmax,abs (ε) [nm] (L cm−1 mol−1)'), Cell('Emission maxima λmax,em (Φf) [nm] (a.u.)'), Cell('Stokes-shift Δṽ [cm−1]') ] ], rows=[ [Cell(' 5a '), Cell('273.5 (40 100)'), Cell('357.0 (0.77)'), Cell('9400')], [Cell(' 5b '), Cell('268.5 (36 700)'), Cell('359.0 (0.77)'), Cell('8600')], [Cell('Coumarin 343'), Cell('263.0 (38 400)'), Cell('344.5 (0.67)'), Cell('9000')], [Cell(' 5d '), Cell('281.0 (34 200)'), Cell('351.5 (0.97)'), Cell('7100')], [Cell(' 5e '), Cell('285.0 (44 000)'), Cell('382.0 (0.35)'), Cell('8900')], [Cell(' 5f '), Cell('289.0 (43 300)'), Cell('363.0 (0.80)'), Cell('7100')], [Cell(' 5g '), Cell('285.0 (42 000)'), Cell('343.5 (0.86)'), Cell('6000')], [Cell(' 6a '), Cell('283.5 (35 600)'), Cell('344.5 (0.49)'), Cell('6300')], [Cell(' 6b '), Cell('267.5 (35 800)'), Cell('338.5 (0.83)'), Cell('7800')], [Cell(' 6c '), Cell('286.0 (33 000)'), Cell('347.0 (0.27)'), Cell('6200')], [Cell(' 6d '), Cell('306.5 (36 600)'), Cell('384.0 (0.10)'), Cell('6600')], [Cell(' 7 '), Cell('288.5 (62 500)'), Cell('367.0 (0.07)'), Cell('7400')], [Cell('Compound 8a '), Cell('257.0 (36 300), 293.0 sh (25 000)'), Cell('385.0 (0.41)'), Cell('8200')], [Cell(' 8b '), Cell('257.0 (32 000), 296.0 sh (23000)'), Cell('388.0 (0.33)'), Cell('8000')], [Cell(' 8c '), Cell('257.0 (27 400), 307.5 (18900)'), Cell('387.0 (0.12)'), Cell('6700')], [Cell(' 8d '), Cell('268.5 (29 500)'), Cell('385.0 (0.29)'), Cell('11 300')], [Cell('Dye 8e '), Cell('261.5 (39 900), 288.0 sh (29 600), 311.0 sh (20 500)'), Cell('386.5 (0.37)'), Cell('6300')], [Cell(' 8f '), Cell('256.5 (27 260), 296.0 (28404)'), Cell('388.5 (0.35)'), Cell('8000')], [Cell(' 8g '), Cell('272.5 (39 600)'), Cell('394.0 (0.30)'), Cell('11 300')], [Cell(' 8h '), Cell('286.0 (22 900)'), Cell('382.5 (0.33)'), Cell('8800')], [Cell(' 9 '), Cell('254.0 (28 800)'), Cell('338.5 (0.40)'), Cell('9800')]] ) gold = [ {'labels': [u'5a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'40100', 'value': u'273.5'}]}], 'quantum_yields': [{'value': u'0.77'}]}, {'labels': [u'5b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36700', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.77'}]}, {'names': [u'Coumarin 343'], 'quantum_yields': [{'value': u'0.67'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'38400', 'value': u'263.0'}]}]}, {'labels': [u'5d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'34200', 'value': u'281.0'}]}], 'quantum_yields': [{'value': u'0.97'}]}, {'labels': [u'5e'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'44000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.35'}]}, {'labels': [u'5f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'43300', 'value': u'289.0'}]}], 'quantum_yields': [{'value': u'0.80'}]}, {'labels': [u'5g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'42000', 'value': u'285.0'}]}], 'quantum_yields': [{'value': u'0.86'}]}, {'labels': [u'6a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35600', 'value': u'283.5'}]}], 'quantum_yields': [{'value': u'0.49'}]}, {'labels': [u'6b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'35800', 'value': u'267.5'}]}], 'quantum_yields': [{'value': u'0.83'}]}, {'labels': [u'6c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'33000', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.27'}]}, {'labels': [u'6d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36600', 'value': u'306.5'}]}], 'quantum_yields': [{'value': u'0.10'}]}, {'labels': [u'7'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'62500', 'value': u'288.5'}]}], 'quantum_yields': [{'value': u'0.07'}]}, {'labels': [u'8a'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'36300', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'25000', 'value': u'293.0'}]}], 'quantum_yields': [{'value': u'0.41'}]}, {'labels': [u'8b'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'32000', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'23000', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.33'}]}, {'labels': [u'8c'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27400', 'value': u'257.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'18900', 'value': u'307.5'}]}], 'quantum_yields': [{'value': u'0.12'}]}, {'labels': [u'8d'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'29500', 'value': u'268.5'}]}], 'quantum_yields': [{'value': u'0.29'}]}, {'labels': [u'8e'], 'quantum_yields': [{'value': u'0.37'}], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39900', 'value': u'261.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'29600', 'value': u'288.0'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'shape': u'sh', 'extinction': u'20500', 'value': u'311.0'}]}]}, {'labels': [u'8f'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'27260', 'value': u'256.5'}, {'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28404', 'value': u'296.0'}]}], 'quantum_yields': [{'value': u'0.35'}]}, {'labels': [u'8g'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'39600', 'value': u'272.5'}]}], 'quantum_yields': [{'value': u'0.30'}]}, {'labels': [u'8h'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'22900', 'value': u'286.0'}]}], 'quantum_yields': [{'value': u'0.33'}]}, {'labels': [u'9'], 'uvvis_spectra': [{'peaks': [{'units': u'nm', 'extinction_units': u'L cm \u2212 1 mol \u2212 1', 'extinction': u'28800', 'value': u'254.0'}]}], 'quantum_yields': [{'value': u'0.40'}]}, ] for record in t.records: print(record.serialize()) self.assertEqual(gold, [record.serialize() for record in t.records]) def test_spectroscopic_table(self): """""" t
merkle_set.py
from abc import ABCMeta, abstractmethod from hashlib import sha256 from typing import Any, Dict, List, Tuple from chives.types.blockchain_format.sized_bytes import bytes32 """ A simple, confidence-inspiring Merkle Set standard Advantages of this standard: Low CPU requirements Small proofs of inclusion/exclusion Reasonably simple implementation The main tricks in this standard are: Skips repeated hashing of exactly two things even when they share prefix bits Proofs support proving including/exclusion for a large number of values in a single string. They're a serialization of a subset of the tree. Proof format: multiproof: subtree subtree: middle or terminal or truncated or empty middle: MIDDLE 1 subtree subtree terminal: TERMINAL 1 hash 32 # If the sibling is empty truncated implies more than two children. truncated: TRUNCATED 1 hash 32 empty: EMPTY 1 EMPTY: \x00 TERMINAL: \x01
MIDDLE: \x02 TRUNCATED: \x03 """ EMPTY = bytes([0]) TERMINAL = bytes([1]) MIDDLE = bytes([2]) TRUNCATED = bytes([3]) BLANK = bytes32([0] * 32) prehashed: Dict[bytes, Any] = {} def init_prehashed(): for x in [EMPTY, TERMINAL, MIDDLE]: for y in [EMPTY, TERMINAL, MIDDLE]: prehashed[x + y] = sha256(bytes([0] * 30) + x + y) init_prehashed() def hashdown(mystr: bytes) -> bytes: assert len(mystr) == 66 h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy() h.update(mystr[1:33] + mystr[34:]) return h.digest()[:32] def compress_root(mystr: bytes) -> bytes32: assert len(mystr) == 33 if mystr[0:1] == MIDDLE: return bytes32(mystr[1:]) if mystr[0:1] == EMPTY: assert mystr[1:] == BLANK return BLANK return bytes32(sha256(mystr).digest()[:32]) def get_bit(mybytes: bytes, pos: int) -> int: assert len(mybytes) == 32 return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1 class Node(metaclass=ABCMeta): hash: bytes @abstractmethod def get_hash(self) -> bytes: pass @abstractmethod def is_empty(self) -> bool: pass @abstractmethod def is_terminal(self) -> bool: pass @abstractmethod def is_double(self) -> bool: pass @abstractmethod def add(self, toadd: bytes, depth: int) -> "Node": pass @abstractmethod def remove(self, toremove: bytes, depth: int): pass @abstractmethod def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool: pass @abstractmethod def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool): pass @abstractmethod def _audit(self, hashes: List[bytes], bits: List[int]): pass class MerkleSet: root: Node def __init__(self, root: Node = None): if root is None: self.root = _empty else: self.root = root def get_root(self) -> bytes32: return compress_root(self.root.get_hash()) def add_already_hashed(self, toadd: bytes): self.root = self.root.add(toadd, 0) def remove_already_hashed(self, toremove: bytes): self.root = self.root.remove(toremove, 0) def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]: proof: List = [] r = self.root.is_included(tocheck, 0, proof) return r, b"".join(proof) def _audit(self, hashes: List[bytes]): newhashes: List = [] self.root._audit(newhashes, []) assert newhashes == sorted(newhashes) class EmptyNode(Node): def __init__(self): self.hash = BLANK def get_hash(self) -> bytes: return EMPTY + BLANK def is_empty(self) -> bool: return True def is_terminal(self) -> bool: return False def is_double(self) -> bool: raise SetError() def add(self, toadd: bytes, depth: int) -> Node: return TerminalNode(toadd) def remove(self, toremove: bytes, depth: int) -> Node: return self def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool: p.append(EMPTY) return False def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool): p.append(EMPTY) def _audit(self, hashes: List[bytes], bits: List[int]): pass _empty = EmptyNode() class TerminalNode(Node): def __init__(self, hash: bytes, bits: List[int] = None): assert len(hash) == 32 self.hash = hash if bits is not None: self._audit([], bits) def get_hash(self) -> bytes: return TERMINAL + self.hash def is_empty(self) -> bool: return False def is_terminal(self) -> bool: return True def is_double(self) -> bool: raise SetError() def add(self, toadd: bytes, depth: int) -> Node: if toadd == self.hash: return self if toadd > self.hash: return self._make_middle([self, TerminalNode(toadd)], depth) else: return self._make_middle([TerminalNode(toadd), self], depth) def _make_middle(self, children: Any, depth: int) -> Node: cbits = [get_bit(child.hash, depth) for child in children] if cbits[0] != cbits[1]: return MiddleNode(children) nextvals: List[Node] = [_empty, _empty] nextvals[cbits[0] ^ 1] = _empty nextvals[cbits[0]] = self._make_middle(children, depth + 1) return MiddleNode(nextvals) def remove(self, toremove: bytes, depth: int) -> Node: if toremove == self.hash: return _empty return self def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool: p.append(TERMINAL + self.hash) return tocheck == self.hash def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool): p.append(TERMINAL + self.hash) def _audit(self, hashes: List[bytes], bits: List[int]): hashes.append(self.hash) for pos, v in enumerate(bits): assert get_bit(self.hash, pos) == v class MiddleNode(Node): def __init__(self, children: List[Node]): self.children = children if children[0].is_empty() and children[1].is_double(): self.hash = children[1].hash elif children[1].is_empty() and children[0].is_double(): self.hash = children[0].hash else: if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()): raise SetError() if children[1].is_empty() and children[0].is_terminal(): raise SetError if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash: raise SetError self.hash = hashdown(children[0].get_hash() + children[1].get_hash()) def get_hash(self) -> bytes: return MIDDLE + self.hash def is_empty(self) -> bool: return False def is_terminal(self) -> bool: return False def is_double(self) -> bool: if self.children[0].is_empty(): return self.children[1].is_double() if self.children[1].is_empty(): return self.children[0].is_double() return self.children[0].is_terminal() and self.children[1].is_terminal() def add(self, toadd: bytes, depth: int) -> Node: bit = get_bit(toadd, depth) child = self.children[bit] newchild = child.add(toadd, depth + 1) if newchild is child: return self newvals = [x for x in self.children] newvals[bit] = newchild return MiddleNode(newvals) def remove(self, toremove: bytes, depth: int) -> Node: bit = get_bit(toremove, depth) child = self.children[bit] newchild = child.remove(toremove, depth + 1) if newchild is child: return self otherchild = self.children[bit ^ 1] if newchild.is_empty() and otherchild.is_terminal(): return otherchild if newchild.is_terminal() and otherchild.is_empty(): return newchild newvals = [x for x in self.children] newvals[bit] = newchild return MiddleNode(newvals) def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool: p.append(MIDDLE) if get_bit(tocheck, depth) == 0: r = self.children[0].is_included(tocheck, depth + 1, p) self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty()) return r else: self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty()) return self.children[1].is_included(tocheck, depth + 1, p) def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool): if collapse or not self.is_double(): p.append(TRUNCATED + self.hash) else: self.is_included(tocheck, depth, p) def _audit(self, hashes: List[bytes], bits: List[int]): self.children[0]._audit(hashes, bits + [0]) self.children[1]._audit(hashes, bits + [1]) class TruncatedNode(Node): def __init__(self, hash: bytes): self.hash = hash def get_hash(self) -> bytes: return MIDDLE + self.hash def is_empty(self) -> bool: return False def is_terminal(self) -> bool: return False def is_double(self) -> bool: return False def add(self, toadd: bytes, depth: int) -> Node: return self def remove(self, toremove: bytes, depth: int) -> Node: return self def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool: raise SetError() def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool): p.append(TRUNCATED + self.hash) def _audit(self, hashes: List[bytes], bits: List[int]): pass class SetError(Exception): pass def confirm_included(root: Node, val: bytes, proof: bytes32) -> bool: return confirm_not_included_already_hashed(root, sha256(val).digest(), proof) def confirm_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool: return _confirm(root, val, proof, True) def confirm_not_included(root: Node, val: bytes, proof: bytes32) -> bool: return confirm_not_included_already_hashed(root, sha256(val).digest(), proof) def confirm_not_included_already_hashed(root: Node, val: bytes, proof: bytes) -> bool: return _confirm(root, val, proof, False) def _confirm(root: Node, val: bytes, proof: bytes, expected: bool) -> bool: try: p = deserialize_proof(proof) if p.get_root() != root: return False r, junk = p.is_included_already_hashed(val) return r == expected except SetError: return False def deserialize_proof(proof: bytes) -> MerkleSet: try: r, pos = _deserialize(proof, 0, []) if pos != len(proof): raise SetError() return MerkleSet(r) except IndexError: raise SetError() def _deserialize(proof: bytes, pos: int, bits: List[int]) -> Tuple[Node, int]: t = proof[pos : pos + 1] # flake8: noqa if t == EMPTY: return _empty, pos + 1 if t == TERMINAL: return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33 # flake8: noqa if t == TRUNCATED: return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33 # flake8: noqa if t != MIDDLE: raise SetError() v0, pos = _deserialize(proof, pos + 1, bits + [0]) v1, pos = _deserialize(proof, pos, bits + [1]) return MiddleNode([v0, v1]), pos
main.rs
use input::{ event::pointer::{Axis, AxisSource, PointerEvent, PointerEventTrait}, event::Event, }; use std::io::Error; use std::process::{Command, Stdio}; mod config; mod context; mod tracking; fn main() -> Result<(), Box<dyn std::error::Error>> { let settings = config::Config::default().construct(); let left_action: Vec<&str> = settings.left_swipe_action.split_whitespace().collect(); let left_cmd = left_action[0]; let left_args = left_action.get(1..).unwrap(); let right_action: Vec<&str> = settings.right_swipe_action.split_whitespace().collect(); let right_cmd = right_action[0]; let right_args = right_action.get(1..).unwrap(); let mut rt = tokio::runtime::Runtime::new()?; rt.block_on(async { let mut context = context::LibinputContext::open(settings.device.as_str()).map_err(|_| { std::io::Error::new( std::io::ErrorKind::Other, "failed to create libinput context", ) })?; context.resume().map_err(|_| { std::io::Error::new( std::io::ErrorKind::Other, "failed to resume libinput context", ) })?; let mut left_swipe = tracking::SwipeTracking::new(); let mut right_swipe = tracking::SwipeTracking::new(); while let Ok(e) = context.next().await { match e { // We only care about horizontal scroll pointer events, which are generated when // libinput detects two-finger swipes. Event::Pointer(PointerEvent::Axis(pae)) => { if pae.has_axis(Axis::Horizontal) && pae.axis_source() == AxisSource::Finger { // Track which direction the swipe is going. let av = pae.axis_value(Axis::Horizontal); if av < 0.0 { left_swipe.measure_event(pae.time_usec(), av); } else if av > 0.0 { right_swipe.measure_event(pae.time_usec(), av); } else { // No magnitude for the swipe action, which is a special signal that the // swipe has stopped. Calculate based on our running total if we should // actually treat this as a swipe, based on our velocity threshold. let tend = pae.time_usec(); let lvdelta = left_swipe.flush(tend); let rvdelta = right_swipe.flush(tend); // We reverse the direction to emulate natural scrolling: if you drag your // fingers right to left (left swipe), you're swiping the page to the left, // or pulling the next page to you, and vise versa. Just like a book. let result = if lvdelta.is_some() && rvdelta.is_none() { Some((lvdelta.unwrap(), left_cmd, left_args))
Some((rvdelta.unwrap(), right_cmd, right_args)) } else { None }; // This cancels out weird events where the user scrolled/swiped both left // and right or their touchpad picked up something weird. if let Some((vdelta, cmd, cmd_args)) = result { if vdelta.abs() >= settings.threshold { let _ = launch_xdotool(cmd, cmd_args); } } } } } // We only handle pointer events. _ => {} } } Ok(()) }) } fn launch_xdotool(cmd: &str, cmd_opts: &[&str]) -> Result<(), Error> { Command::new(cmd) .args(cmd_opts) .stdin(Stdio::null()) .stdout(Stdio::null()) .stderr(Stdio::null()) .spawn()? .wait_with_output()?; Ok(()) }
} else if rvdelta.is_some() && lvdelta.is_none() {
_io.py
import sys import time import signal import subprocess from ._utils import get_ffmpeg_exe, logger from ._parsing import LogCatcher, parse_ffmpeg_header, cvsecs ISWIN = sys.platform.startswith("win") exe = None def _get_exe(): global exe if exe is None: exe = get_ffmpeg_exe() return exe def count_frames_and_secs(path): """ Get the number of frames and number of seconds for the given video file. Note that this operation can be quite slow for large files. Disclaimer: I've seen this produce different results from actually reading the frames with older versions of ffmpeg (2.x). Therefore I cannot say with 100% certainty that the returned values are always exact. """ # https://stackoverflow.com/questions/2017843/fetch-frame-count-with-ffmpeg assert isinstance(path, str), "Video path must be a string" cmd = [_get_exe(), "-i", path, "-map", "0:v:0", "-c", "copy", "-f", "null", "-"] try: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=ISWIN) except subprocess.CalledProcessError as err: out = err.output.decode(errors="ignore") raise RuntimeError("FFMEG call failed with {}:\n{}".format(err.returncode, out)) # Note that other than with the subprocess calls below, ffmpeg wont hang here. # Worst case Python will stop/crash and ffmpeg will continue running until done. nframes = nsecs = None for line in reversed(out.splitlines()): if line.startswith(b"frame="): line = line.decode(errors="ignore") i = line.find("frame=") if i >= 0: s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() nframes = int(s) i = line.find("time=") if i >= 0: s = line[i:].split("=", 1)[-1].lstrip().split(" ", 1)[0].strip() nsecs = cvsecs(*s.split(":")) return nframes, nsecs raise RuntimeError("Could not get number of frames") # pragma: no cover def read_frames(path, pix_fmt="rgb24", bpp=3, input_params=None, output_params=None): """ Create a generator to iterate over the frames in a video file. It first yields a small metadata dictionary that contains: * ffmpeg_version: the ffmpeg version is use (as a string). * codec: a hint about the codec used to encode the video, e.g. "h264" * source_size: the width and height of the encoded video frames * size: the width and height of the frames that will be produced * fps: the frames per second. Can be zero if it could not be detected. * duration: duration in seconds. Can be zero if it could not be detected. After that, it yields frames until the end of the video is reached. Each frame is a bytes object. This function makes no assumptions about the number of frames in the data. For one because this is hard to predict exactly, but also because it may depend on the provided output_params. If you want to know the number of frames in a video file, use count_frames_and_secs(). It is also possible to estimate the number of frames from the fps and duration, but note that even if both numbers are present, the resulting value is not always correct. Example: gen = read_frames(path) meta = gen.__next__() for frame in gen: print(len(frame)) Parameters: path (str): the file to write to. pix_fmt (str): the pixel format of the frames to be read. The default is "rgb24" (frames are uint8 RGB images). bpp (int): The number of bytes per pixel in the output frames. This depends on the given pix_fmt. Default is 3 (RGB). input_params (list): Additional ffmpeg input command line parameters. output_params (list): Additional ffmpeg output command line parameters. """ # ----- Input args assert isinstance(path, str), "Video path must be a string" # Note: Dont check whether it exists. The source could be e.g. a camera. pix_fmt = pix_fmt or "rgb24" bpp = bpp or 3 input_params = input_params or [] output_params = output_params or [] assert isinstance(pix_fmt, str), "pix_fmt must be a string" assert isinstance(bpp, int), "bpp must be an int" assert isinstance(input_params, list), "input_params must be a list" assert isinstance(output_params, list), "output_params must be a list" # ----- Prepare pre_output_params = ["-pix_fmt", pix_fmt, "-vcodec", "rawvideo", "-f", "image2pipe"] cmd = [_get_exe()] cmd += input_params + ["-i", path] cmd += pre_output_params + output_params + ["-"] cmd = ' '.join(cmd) p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=ISWIN, ) log_catcher = LogCatcher(p.stderr) try: # ----- Load meta data # Wait for the log catcher to get the meta information etime = time.time() + 10.0 while (not log_catcher.header) and time.time() < etime: time.sleep(0.01) # Check whether we have the information if not log_catcher.header: err2 = log_catcher.get_text(0.2) fmt = "Could not load meta information\n=== stderr ===\n{}" raise IOError(fmt.format(err2)) elif "No such file or directory" in log_catcher.header: raise IOError("{} not found! Wrong path?".format(path)) meta = parse_ffmpeg_header(log_catcher.header) yield meta # ----- Read frames w, h = meta["size"] framesize = w * h * bpp framenr = 0 while True: framenr += 1 try: bb = bytes() while len(bb) < framesize: extra_bytes = p.stdout.read(framesize - len(bb)) if not extra_bytes: if len(bb) == 0: return else: raise RuntimeError( "End of file reached before full frame could be read." ) bb += extra_bytes yield bb except Exception as err: err1 = str(err) err2 = log_catcher.get_text(0.4) fmt = "Could not read frame {}:\n{}\n=== stderr ===\n{}" raise RuntimeError(fmt.format(framenr, err1, err2)) finally: # Generators are automatically closed when they get deleted, # so this code is almost guaranteed to run. if p.poll() is None: # Ask ffmpeg to quit try: if True: p.communicate(b"q") else: # pragma: no cover # I read somewhere that modern ffmpeg on Linux prefers a # "ctrl-c", but tests so far suggests sending q is better. p.send_signal(signal.SIGINT) except Exception as err: # pragma: no cover logger.warning("Error while attempting stop ffmpeg: " + str(err))
etime = time.time() + 1.5 while time.time() < etime and p.poll() is None: time.sleep(0.01) # Grr, we have to kill it if p.poll() is None: # pragma: no cover logger.warning("We had to kill ffmpeg to stop it.") p.kill() def write_frames( path, size, pix_fmt_in="rgb24", pix_fmt_out="yuv420p", fps=16, quality=5, bitrate=None, codec=None, macro_block_size=16, ffmpeg_log_level="warning", ffmpeg_timeout=20.0, input_params=None, output_params=None, ): """ Create a generator to write frames (bytes objects) into a video file. The frames are written by using the generator's `send()` method. Frames can be anything that can be written to a file. Typically these are bytes objects, but c-contiguous Numpy arrays also work. Example: gen = write_frames(path, size) gen.send(None) # seed the generator for frame in frames: gen.send(frame) gen.close() # don't forget this Parameters: path (str): the file to write to. size (tuple): the width and height of the frames. pix_fmt_in (str): the pixel format of incoming frames. E.g. "gray", "gray8a", "rgb24", or "rgba". Default "rgb24". pix_fmt_out (str): the pixel format to store frames. Default yuv420p". fps (float): The frames per second. Default 16. quality (float): A measure for quality between 0 and 10. Default 5. Ignored if bitrate is given. bitrate (str): The bitrate, e.g. "192k". The defaults are pretty good. codec (str): The codec. Default "libx264" (or "msmpeg4" for .wmv). macro_block_size (int): You probably want to align the size of frames to this value to avoid image resizing. Default 16. Can be set to 1 to avoid block alignment, though this is not recommended. ffmpeg_log_level (str): The ffmpeg logging level. Default "warning". ffmpeg_timeout (float): Timeout in seconds to wait for ffmpeg process to finish. Value of 0 will wait forever. The time that ffmpeg needs depends on CPU speed, compression, and frame size. Default 20.0. input_params (list): Additional ffmpeg input command line parameters. output_params (list): Additional ffmpeg output command line parameters. """ # ----- Input args assert isinstance(path, str), "Video path must be a string" # The pix_fmt_out yuv420p is the best for the outpur to work in # QuickTime and most other players. These players only support # the YUV planar color space with 4:2:0 chroma subsampling for # H.264 video. Otherwise, depending on the source, ffmpeg may # output to a pixel format that may be incompatible with these # players. See https://trac.ffmpeg.org/wiki/Encode/H.264#Encodingfordumbplayers pix_fmt_in = pix_fmt_in or "rgb24" pix_fmt_out = pix_fmt_out or "yuv420p" fps = fps or 16 quality = quality or 5 # bitrate, codec, macro_block_size can all be None or ... macro_block_size = macro_block_size or 16 ffmpeg_log_level = ffmpeg_log_level or "warning" input_params = input_params or [] output_params = output_params or [] floatish = float, int if isinstance(size, (tuple, list)): assert len(size) == 2, "size must be a 2-tuple" assert isinstance(size[0], int) and isinstance( size[1], int ), "size must be ints" sizestr = "{:d}x{:d}".format(*size) # elif isinstance(size, str): # assert "x" in size, "size as string must have format NxM" # sizestr = size else: assert False, "size must be str or tuple" assert isinstance(pix_fmt_in, str), "pix_fmt_in must be str" assert isinstance(pix_fmt_out, str), "pix_fmt_out must be str" assert isinstance(fps, floatish), "fps must be float" assert isinstance(quality, floatish), "quality must be float" assert 1 <= quality <= 10, "quality must be between 1 and 10 inclusive" assert isinstance(macro_block_size, int), "macro_block_size must be int" assert isinstance(ffmpeg_log_level, str), "ffmpeg_log_level must be str" assert isinstance(ffmpeg_timeout, floatish), "ffmpeg_timeout must be float" assert isinstance(input_params, list), "input_params must be a list" assert isinstance(output_params, list), "output_params must be a list" # ----- Prepare # Get parameters default_codec = "libx264" if path.lower().endswith(".wmv"): # This is a safer default codec on windows to get videos that # will play in powerpoint and other apps. H264 is not always # available on windows. default_codec = "msmpeg4" codec = codec or default_codec # Get command cmd = [_get_exe(), "-y", "-f", "rawvideo", "-vcodec", "rawvideo", "-s", sizestr] cmd += ["-pix_fmt", pix_fmt_in, "-r", "{:.02f}".format(fps)] + input_params cmd += ["-i", "-"] cmd += ["-an", "-vcodec", codec, "-pix_fmt", pix_fmt_out] # Add fixed bitrate or variable bitrate compression flags if bitrate is not None: cmd += ["-b:v", str(bitrate)] elif quality is not None: # If None, then we don't add anything quality = 1 - quality / 10.0 if codec == "libx264": # crf ranges 0 to 51, 51 being worst. quality = int(quality * 51) cmd += ["-crf", str(quality)] # for h264 else: # Many codecs accept q:v # q:v range can vary, 1-31, 31 being worst # But q:v does not always have the same range. # May need a way to find range for any codec. quality = int(quality * 30) + 1 cmd += ["-qscale:v", str(quality)] # for others # Note, for most codecs, the image dimensions must be divisible by # 16 the default for the macro_block_size is 16. Check if image is # divisible, if not have ffmpeg upsize to nearest size and warn # user they should correct input image if this is not desired. if macro_block_size > 1: if size[0] % macro_block_size > 0 or size[1] % macro_block_size > 0: out_w = size[0] out_h = size[1] if size[0] % macro_block_size > 0: out_w += macro_block_size - (size[0] % macro_block_size) if size[1] % macro_block_size > 0: out_h += macro_block_size - (size[1] % macro_block_size) cmd += ["-vf", "scale={}:{}".format(out_w, out_h)] logger.warning( "IMAGEIO FFMPEG_WRITER WARNING: input image is not" " divisible by macro_block_size={}, resizing from {} " "to {} to ensure video compatibility with most codecs " "and players. To prevent resizing, make your input " "image divisible by the macro_block_size or set the " "macro_block_size to 1 (risking incompatibility).".format( macro_block_size, size[:2], (out_w, out_h) ) ) # Rather than redirect stderr to a pipe, just set minimal # output from ffmpeg by default. That way if there are warnings # the user will see them. cmd += ["-v", ffmpeg_log_level] cmd += output_params cmd.append(path) cmd_str = " ".join(cmd) if any( [level in ffmpeg_log_level for level in ("info", "verbose", "debug", "trace")] ): logger.info("RUNNING FFMPEG COMMAND: " + cmd_str) # Launch process p = subprocess.Popen( cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, shell=ISWIN ) # For Windows, set `shell=True` in sp.Popen to prevent popup # of a command line window in frozen applications. # Note that directing stderr to a pipe on windows will cause ffmpeg # to hang if the buffer is not periodically cleared using # StreamCatcher or other means. # Setting bufsize to 0 or a small value does not seem to have much effect # (at least on Windows). I suspect that ffmpeg buffers # multiple frames # (before encoding in a batch). # ----- Write frames try: # Just keep going until the generator.close() is called (raises GeneratorExit). # This could also happen when the generator is deleted somehow. nframes = 0 while True: # Get frame bb = (yield) # framesize = size[0] * size[1] * depth * bpp # assert isinstance(bb, bytes), "Frame must be send as bytes" # assert len(bb) == framesize, "Frame must have width*height*depth*bpp bytes" # Actually, we accept anything that can be written to file. # This e.g. allows writing numpy arrays without having to make a copy ... # Write try: p.stdin.write(bb) except Exception as err: # Show the command and stderr from pipe msg = ( "{0:}\n\nFFMPEG COMMAND:\n{1:}\n\nFFMPEG STDERR " "OUTPUT:\n".format(err, cmd_str) ) raise IOError(msg) nframes += 1 except GeneratorExit: if nframes == 0: logger.warning("No frames have been written; the written video is invalid.") finally: if p.poll() is None: # Ask ffmpeg to quit - and wait for it to finish writing the file. # Depending on the frame size and encoding this can take a few # seconds (sometimes 10-20). Since a user may get bored and hit # Ctrl-C, we wrap this in a try-except. waited = False try: try: p.stdin.close() except Exception: # pragma: no cover pass etime = time.time() + ffmpeg_timeout while (not ffmpeg_timeout or time.time() < etime) and p.poll() is None: time.sleep(0.01) waited = True finally: # Grr, we have to kill it if p.poll() is None: # pragma: no cover more = " Consider increasing ffmpeg_timeout." if waited else "" logger.warning("We had to kill ffmpeg to stop it." + more) p.kill()
# Wait for it to stop
lib.rs
use ansi_term::Color; use bigdecimal::BigDecimal; use codespan_reporting::diagnostic::{Diagnostic, Label}; use derive_new::new; use getset::Getters; use nu_source::{b, DebugDocBuilder, HasFallibleSpan, PrettyDebug, Span, Spanned, SpannedItem}; use num_bigint::BigInt; use num_traits::ToPrimitive; use serde::{Deserialize, Serialize}; use std::fmt; use std::ops::Range; /// A structured reason for a ParseError. Note that parsing in nu is more like macro expansion in /// other languages, so the kinds of errors that can occur during parsing are more contextual than /// you might expect. #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)] pub enum ParseErrorReason { /// The parser encountered an EOF rather than what it was expecting Eof { expected: String, span: Span }, /// The parser expected to see the end of a token stream (possibly the token /// stream from inside a delimited token node), but found something else. ExtraTokens { actual: Spanned<String> }, /// The parser encountered something other than what it was expecting Mismatch { expected: String, actual: Spanned<String>, }, /// An unexpected internal error has occurred InternalError { message: Spanned<String> }, /// The parser tried to parse an argument for a command, but it failed for /// some reason ArgumentError { command: Spanned<String>, error: ArgumentError, }, } /// A newtype for `ParseErrorReason` #[derive(Debug, Clone, Getters, PartialEq, PartialOrd, Eq, Ord, Hash, Serialize, Deserialize)] pub struct ParseError { #[get = "pub"] reason: ParseErrorReason, } impl ParseError { /// Construct a [ParseErrorReason::Eof](ParseErrorReason::Eof) pub fn unexpected_eof(expected: impl Into<String>, span: Span) -> ParseError { ParseError { reason: ParseErrorReason::Eof { expected: expected.into(), span, }, } } /// Construct a [ParseErrorReason::ExtraTokens](ParseErrorReason::ExtraTokens) pub fn extra_tokens(actual: Spanned<impl Into<String>>) -> ParseError { let Spanned { span, item } = actual; ParseError { reason: ParseErrorReason::ExtraTokens { actual: item.into().spanned(span), }, } } /// Construct a [ParseErrorReason::Mismatch](ParseErrorReason::Mismatch) pub fn mismatch(expected: impl Into<String>, actual: Spanned<impl Into<String>>) -> ParseError { let Spanned { span, item } = actual; ParseError { reason: ParseErrorReason::Mismatch { expected: expected.into(), actual: item.into().spanned(span), }, } } /// Construct a [ParseErrorReason::InternalError](ParseErrorReason::InternalError) pub fn internal_error(message: Spanned<impl Into<String>>) -> ParseError { ParseError { reason: ParseErrorReason::InternalError { message: message.item.into().spanned(message.span), }, } } /// Construct a [ParseErrorReason::ArgumentError](ParseErrorReason::ArgumentError) pub fn argument_error(command: Spanned<impl Into<String>>, kind: ArgumentError) -> ParseError { ParseError { reason: ParseErrorReason::ArgumentError { command: command.item.into().spanned(command.span), error: kind, }, } } } /// Convert a [ParseError](ParseError) into a [ShellError](ShellError) impl From<ParseError> for ShellError { fn from(error: ParseError) -> ShellError { match error.reason { ParseErrorReason::Eof { expected, span } => ShellError::unexpected_eof(expected, span), ParseErrorReason::ExtraTokens { actual } => ShellError::type_error("nothing", actual), ParseErrorReason::Mismatch { actual, expected } => { ShellError::type_error(expected, actual) } ParseErrorReason::InternalError { message } => ShellError::labeled_error( format!("Internal error: {}", message.item), &message.item, &message.span, ), ParseErrorReason::ArgumentError { command, error } => { ShellError::argument_error(command, error) } } } } /// ArgumentError describes various ways that the parser could fail because of unexpected arguments. /// Nu commands are like a combination of functions and macros, and these errors correspond to /// problems that could be identified during expansion based on the syntactic signature of a /// command. #[derive(Debug, Eq, PartialEq, Clone, Ord, Hash, PartialOrd, Serialize, Deserialize)] pub enum ArgumentError { /// The command specified a mandatory flag, but it was missing. MissingMandatoryFlag(String), /// The command specified a mandatory positional argument, but it was missing. MissingMandatoryPositional(String), /// A flag was found, and it should have been followed by a value, but no value was found MissingValueForName(String), /// An argument was found, but the command does not recognize it UnexpectedArgument(Spanned<String>), /// An flag was found, but the command does not recognize it UnexpectedFlag(Spanned<String>), /// A sequence of characters was found that was not syntactically valid (but would have /// been valid if the command was an external command) InvalidExternalWord, } impl PrettyDebug for ArgumentError { fn pretty(&self) -> DebugDocBuilder { match self { ArgumentError::MissingMandatoryFlag(flag) => { b::description("missing `") + b::description(flag) + b::description("` as mandatory flag") } ArgumentError::UnexpectedArgument(name) => { b::description("unexpected `") + b::description(&name.item) + b::description("` is not supported") } ArgumentError::UnexpectedFlag(name) => { b::description("unexpected `") + b::description(&name.item) + b::description("` is not supported") } ArgumentError::MissingMandatoryPositional(pos) => { b::description("missing `") + b::description(pos) + b::description("` as mandatory positional argument") } ArgumentError::MissingValueForName(name) => { b::description("missing value for flag `") + b::description(name) + b::description("`") } ArgumentError::InvalidExternalWord => b::description("invalid word"), } } } /// A `ShellError` is a proximate error and a possible cause, which could have its own cause, /// creating a cause chain. #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Clone, Serialize, Deserialize, Hash)] pub struct ShellError { pub error: ProximateShellError, pub cause: Option<Box<ShellError>>, } /// `PrettyDebug` is for internal debugging. For user-facing debugging, [into_diagnostic](ShellError::into_diagnostic) /// is used, which prints an error, highlighting spans. impl PrettyDebug for ShellError { fn pretty(&self) -> DebugDocBuilder { match &self.error { ProximateShellError::SyntaxError { problem } => { b::error("Syntax Error") + b::space() + b::delimit("(", b::description(&problem.item), ")") } ProximateShellError::UnexpectedEof { .. } => b::error("Unexpected end"), ProximateShellError::TypeError { expected, actual } => { b::error("Type Error") + b::space() + b::delimit( "(", b::description("expected:") + b::space() + b::description(expected) + b::description(",") + b::space() + b::description("actual:") + b::space() + b::option(actual.item.as_ref().map(b::description)), ")", ) } ProximateShellError::MissingProperty { subpath, expr } => { b::error("Missing Property") + b::space() + b::delimit( "(", b::description("expr:") + b::space() + b::description(&expr.item) + b::description(",") + b::space() + b::description("subpath:") + b::space() + b::description(&subpath.item), ")", ) } ProximateShellError::InvalidIntegerIndex { subpath, .. } => { b::error("Invalid integer index") + b::space() + b::delimit( "(", b::description("subpath:") + b::space() + b::description(&subpath.item), ")", ) } ProximateShellError::MissingValue { reason, .. } => { b::error("Missing Value") + b::space() + b::delimit( "(", b::description("reason:") + b::space() + b::description(reason), ")", ) } ProximateShellError::ArgumentError { command, error } => { b::error("Argument Error") + b::space() + b::delimit( "(", b::description("command:") + b::space() + b::description(&command.item) + b::description(",") + b::space() + b::description("error:") + b::space() + error.pretty(), ")", ) } ProximateShellError::RangeError { kind, actual_kind, operation, } => { b::error("Range Error") + b::space() + b::delimit( "(", b::description("expected:") + b::space() + kind.pretty() + b::description(",") + b::space() + b::description("actual:") + b::space() + b::description(&actual_kind.item) + b::description(",") + b::space() + b::description("operation:") + b::space() + b::description(operation), ")", ) } ProximateShellError::Diagnostic(_) => b::error("diagnostic"), ProximateShellError::CoerceError { left, right } => { b::error("Coercion Error") + b::space() + b::delimit( "(", b::description("left:") + b::space() + b::description(&left.item) + b::description(",") + b::space() + b::description("right:") + b::space() + b::description(&right.item), ")", ) } ProximateShellError::UntaggedRuntimeError { reason } => { b::error("Unknown Error") + b::delimit("(", b::description(reason), ")") } ProximateShellError::ExternalPlaceholderError => { b::error("non-zero external exit code") } } } } impl std::fmt::Display for ShellError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.pretty().display()) } } impl serde::de::Error for ShellError { fn custom<T>(msg: T) -> Self where T: std::fmt::Display, { ShellError::untagged_runtime_error(msg.to_string()) } } impl ShellError { /// An error that describes a mismatch between the given type and the expected type pub fn type_error( expected: impl Into<String>, actual: Spanned<impl Into<String>>, ) -> ShellError { ProximateShellError::TypeError { expected: expected.into(), actual: actual.map(|i| Some(i.into())), } .start() } pub fn missing_property( subpath: Spanned<impl Into<String>>, expr: Spanned<impl Into<String>>, ) -> ShellError
pub fn invalid_integer_index( subpath: Spanned<impl Into<String>>, integer: impl Into<Span>, ) -> ShellError { ProximateShellError::InvalidIntegerIndex { subpath: subpath.map(|s| s.into()), integer: integer.into(), } .start() } pub fn untagged_runtime_error(error: impl Into<String>) -> ShellError { ProximateShellError::UntaggedRuntimeError { reason: error.into(), } .start() } pub fn unexpected_eof(expected: impl Into<String>, span: impl Into<Span>) -> ShellError { ProximateShellError::UnexpectedEof { expected: expected.into(), span: span.into(), } .start() } pub fn range_error( expected: impl Into<ExpectedRange>, actual: &Spanned<impl fmt::Debug>, operation: impl Into<String>, ) -> ShellError { ProximateShellError::RangeError { kind: expected.into(), actual_kind: format!("{:?}", actual.item).spanned(actual.span), operation: operation.into(), } .start() } pub fn syntax_error(problem: Spanned<impl Into<String>>) -> ShellError { ProximateShellError::SyntaxError { problem: problem.map(|p| p.into()), } .start() } pub fn coerce_error( left: Spanned<impl Into<String>>, right: Spanned<impl Into<String>>, ) -> ShellError { ProximateShellError::CoerceError { left: left.map(|l| l.into()), right: right.map(|r| r.into()), } .start() } pub fn argument_error(command: Spanned<impl Into<String>>, kind: ArgumentError) -> ShellError { ProximateShellError::ArgumentError { command: command.map(|c| c.into()), error: kind, } .start() } pub fn diagnostic(diagnostic: Diagnostic<usize>) -> ShellError { ProximateShellError::Diagnostic(ShellDiagnostic { diagnostic }).start() } pub fn external_non_zero() -> ShellError { ProximateShellError::ExternalPlaceholderError.start() } pub fn into_diagnostic(self) -> Option<Diagnostic<usize>> { match self.error { ProximateShellError::MissingValue { span, reason } => { let mut d = Diagnostic::bug().with_message(format!("Internal Error (missing value) :: {}", reason)); if let Some(span) = span { d = d.with_labels(vec![Label::primary(0, span)]); } Some(d) } ProximateShellError::ArgumentError { command, error, } => Some(match error { ArgumentError::InvalidExternalWord => Diagnostic::error().with_message("Invalid bare word for Nu command (did you intend to invoke an external command?)") .with_labels(vec![Label::primary(0, command.span)]), ArgumentError::UnexpectedArgument(argument) => Diagnostic::error().with_message( format!( "{} unexpected {}", Color::Cyan.paint(&command.item), Color::Green.bold().paint(&argument.item) ) ) .with_labels( vec![Label::primary(0, argument.span).with_message( format!("unexpected argument (try {} -h)", &command.item))] ), ArgumentError::UnexpectedFlag(flag) => Diagnostic::error().with_message( format!( "{} unexpected {}", Color::Cyan.paint(&command.item), Color::Green.bold().paint(&flag.item) ), ) .with_labels(vec![ Label::primary(0, flag.span).with_message( format!("unexpected flag (try {} -h)", &command.item)) ]), ArgumentError::MissingMandatoryFlag(name) => Diagnostic::error().with_message( format!( "{} requires {}{}", Color::Cyan.paint(&command.item), Color::Green.bold().paint("--"), Color::Green.bold().paint(name) ), ) .with_labels(vec![Label::primary(0, command.span)]), ArgumentError::MissingMandatoryPositional(name) => Diagnostic::error().with_message( format!( "{} requires {} parameter", Color::Cyan.paint(&command.item), Color::Green.bold().paint(name.clone()) ), ) .with_labels( vec![Label::primary(0, command.span).with_message(format!("requires {} parameter", name))], ), ArgumentError::MissingValueForName(name) => Diagnostic::error().with_message( format!( "{} is missing value for flag {}{}", Color::Cyan.paint(&command.item), Color::Green.bold().paint("--"), Color::Green.bold().paint(name) ), ) .with_labels(vec![Label::primary(0, command.span)]), }), ProximateShellError::TypeError { expected, actual: Spanned { item: Some(actual), span, }, } => Some(Diagnostic::error().with_message("Type Error").with_labels( vec![Label::primary(0, span) .with_message(format!("Expected {}, found {}", expected, actual))]), ), ProximateShellError::TypeError { expected, actual: Spanned { item: None, span }, } => Some(Diagnostic::error().with_message("Type Error") .with_labels(vec![Label::primary(0, span).with_message(expected)])), ProximateShellError::UnexpectedEof { expected, span } => Some(Diagnostic::error().with_message("Unexpected end of input") .with_labels(vec![Label::primary(0, span).with_message(format!("Expected {}", expected))])), ProximateShellError::RangeError { kind, operation, actual_kind: Spanned { item, span }, } => Some(Diagnostic::error().with_message("Range Error").with_labels( vec![Label::primary(0, span).with_message(format!( "Expected to convert {} to {} while {}, but it was out of range", item, kind.display(), operation ))]), ), ProximateShellError::SyntaxError { problem: Spanned { span, item }, } => Some(Diagnostic::error().with_message("Syntax Error") .with_labels(vec![Label::primary(0, span).with_message(item)])), ProximateShellError::MissingProperty { subpath, expr, .. } => { let mut diag = Diagnostic::error().with_message("Missing property"); if subpath.span == Span::unknown() { diag.message = format!("Missing property (for {})", subpath.item); } else { let subpath = Label::primary(0, subpath.span).with_message(subpath.item); let mut labels = vec![]; labels.push(subpath); if expr.span != Span::unknown() { let expr = Label::primary(0, expr.span).with_message(expr.item); labels.push(expr); } diag = diag.with_labels(labels); } Some(diag) } ProximateShellError::InvalidIntegerIndex { subpath,integer } => { let mut diag = Diagnostic::error().with_message("Invalid integer property"); let mut labels = vec![]; if subpath.span == Span::unknown() { diag.message = format!("Invalid integer property (for {})", subpath.item) } else { let label = Label::primary(0, subpath.span).with_message(subpath.item); labels.push(label); } labels.push(Label::secondary(0, integer).with_message("integer")); diag = diag.with_labels(labels); Some(diag) } ProximateShellError::Diagnostic(diag) => Some(diag.diagnostic), ProximateShellError::CoerceError { left, right } => { Some(Diagnostic::error().with_message("Coercion error") .with_labels(vec![Label::primary(0, left.span).with_message(left.item), Label::secondary(0, right.span).with_message(right.item)])) } ProximateShellError::UntaggedRuntimeError { reason } => Some(Diagnostic::error().with_message(format!("Error: {}", reason))), ProximateShellError::ExternalPlaceholderError => None, } } pub fn labeled_error( msg: impl Into<String>, label: impl Into<String>, span: impl Into<Span>, ) -> ShellError { ShellError::diagnostic( Diagnostic::error() .with_message(msg.into()) .with_labels(vec![ Label::primary(0, span.into()).with_message(label.into()) ]), ) } pub fn labeled_error_with_secondary( msg: impl Into<String>, primary_label: impl Into<String>, primary_span: impl Into<Span>, secondary_label: impl Into<String>, secondary_span: impl Into<Span>, ) -> ShellError { ShellError::diagnostic( Diagnostic::error() .with_message(msg.into()) .with_labels(vec![ Label::primary(0, primary_span.into()).with_message(primary_label.into()), Label::secondary(0, secondary_span.into()).with_message(secondary_label.into()), ]), ) } pub fn unimplemented(title: impl Into<String>) -> ShellError { ShellError::untagged_runtime_error(&format!("Unimplemented: {}", title.into())) } pub fn unexpected(title: impl Into<String>) -> ShellError { ShellError::untagged_runtime_error(&format!("Unexpected: {}", title.into())) } } /// `ExpectedRange` describes a range of values that was expected by a command. In addition /// to typical ranges, this enum allows an error to specify that the range of allowed values /// corresponds to a particular numeric type (which is a dominant use-case for the /// [RangeError](ProximateShellError::RangeError) error type). #[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Serialize, Deserialize)] pub enum ExpectedRange { I8, I16, I32, I64, I128, U8, U16, U32, U64, U128, F32, F64, Usize, Size, BigInt, BigDecimal, Range { start: usize, end: usize }, } /// Convert a Rust range into an [ExpectedRange](ExpectedRange). impl From<Range<usize>> for ExpectedRange { fn from(range: Range<usize>) -> Self { ExpectedRange::Range { start: range.start, end: range.end, } } } impl PrettyDebug for ExpectedRange { fn pretty(&self) -> DebugDocBuilder { b::description(match self { ExpectedRange::I8 => "an 8-bit signed integer", ExpectedRange::I16 => "a 16-bit signed integer", ExpectedRange::I32 => "a 32-bit signed integer", ExpectedRange::I64 => "a 64-bit signed integer", ExpectedRange::I128 => "a 128-bit signed integer", ExpectedRange::U8 => "an 8-bit unsigned integer", ExpectedRange::U16 => "a 16-bit unsigned integer", ExpectedRange::U32 => "a 32-bit unsigned integer", ExpectedRange::U64 => "a 64-bit unsigned integer", ExpectedRange::U128 => "a 128-bit unsigned integer", ExpectedRange::F32 => "a 32-bit float", ExpectedRange::F64 => "a 64-bit float", ExpectedRange::Usize => "an list index", ExpectedRange::Size => "a list offset", ExpectedRange::BigDecimal => "a decimal", ExpectedRange::BigInt => "an integer", ExpectedRange::Range { start, end } => { return b::description(format!("{} to {}", start, end)) } }) } } #[derive(Debug, Eq, PartialEq, Clone, Ord, PartialOrd, Serialize, Deserialize, Hash)] pub enum ProximateShellError { SyntaxError { problem: Spanned<String>, }, UnexpectedEof { expected: String, span: Span, }, TypeError { expected: String, actual: Spanned<Option<String>>, }, MissingProperty { subpath: Spanned<String>, expr: Spanned<String>, }, InvalidIntegerIndex { subpath: Spanned<String>, integer: Span, }, MissingValue { span: Option<Span>, reason: String, }, ArgumentError { command: Spanned<String>, error: ArgumentError, }, RangeError { kind: ExpectedRange, actual_kind: Spanned<String>, operation: String, }, Diagnostic(ShellDiagnostic), CoerceError { left: Spanned<String>, right: Spanned<String>, }, UntaggedRuntimeError { reason: String, }, ExternalPlaceholderError, } impl ProximateShellError { fn start(self) -> ShellError { ShellError { cause: None, error: self, } } } impl HasFallibleSpan for ShellError { fn maybe_span(&self) -> Option<Span> { self.error.maybe_span() } } impl HasFallibleSpan for ProximateShellError { fn maybe_span(&self) -> Option<Span> { Some(match self { ProximateShellError::SyntaxError { problem } => problem.span, ProximateShellError::UnexpectedEof { span, .. } => *span, ProximateShellError::TypeError { actual, .. } => actual.span, ProximateShellError::MissingProperty { subpath, .. } => subpath.span, ProximateShellError::InvalidIntegerIndex { subpath, .. } => subpath.span, ProximateShellError::MissingValue { span, .. } => return *span, ProximateShellError::ArgumentError { command, .. } => command.span, ProximateShellError::RangeError { actual_kind, .. } => actual_kind.span, ProximateShellError::Diagnostic(_) => return None, ProximateShellError::CoerceError { left, right } => left.span.until(right.span), ProximateShellError::UntaggedRuntimeError { .. } => return None, ProximateShellError::ExternalPlaceholderError => return None, }) } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ShellDiagnostic { pub diagnostic: Diagnostic<usize>, } impl std::hash::Hash for ShellDiagnostic { fn hash<H: std::hash::Hasher>(&self, state: &mut H) { self.diagnostic.severity.hash(state); self.diagnostic.code.hash(state); self.diagnostic.message.hash(state); for label in &self.diagnostic.labels { label.range.hash(state); label.message.hash(state); match label.style { codespan_reporting::diagnostic::LabelStyle::Primary => 0.hash(state), codespan_reporting::diagnostic::LabelStyle::Secondary => 1.hash(state), } } } } impl PartialEq for ShellDiagnostic { fn eq(&self, _other: &ShellDiagnostic) -> bool { false } } impl Eq for ShellDiagnostic {} impl std::cmp::PartialOrd for ShellDiagnostic { fn partial_cmp(&self, _other: &Self) -> Option<std::cmp::Ordering> { Some(std::cmp::Ordering::Less) } } impl std::cmp::Ord for ShellDiagnostic { fn cmp(&self, _other: &Self) -> std::cmp::Ordering { std::cmp::Ordering::Less } } #[derive(Debug, Ord, PartialOrd, Eq, PartialEq, new, Clone, Serialize, Deserialize)] pub struct StringError { title: String, error: String, } impl std::error::Error for ShellError {} impl std::convert::From<Box<dyn std::error::Error>> for ShellError { fn from(input: Box<dyn std::error::Error>) -> ShellError { ShellError::untagged_runtime_error(format!("{}", input)) } } impl std::convert::From<std::io::Error> for ShellError { fn from(input: std::io::Error) -> ShellError { ShellError::untagged_runtime_error(format!("{}", input)) } } impl std::convert::From<serde_yaml::Error> for ShellError { fn from(input: serde_yaml::Error) -> ShellError { ShellError::untagged_runtime_error(format!("{:?}", input)) } } impl std::convert::From<toml::ser::Error> for ShellError { fn from(input: toml::ser::Error) -> ShellError { ShellError::untagged_runtime_error(format!("{:?}", input)) } } impl std::convert::From<serde_json::Error> for ShellError { fn from(input: serde_json::Error) -> ShellError { ShellError::untagged_runtime_error(format!("{:?}", input)) } } impl std::convert::From<Box<dyn std::error::Error + Send + Sync>> for ShellError { fn from(input: Box<dyn std::error::Error + Send + Sync>) -> ShellError { ShellError::untagged_runtime_error(format!("{:?}", input)) } } impl std::convert::From<glob::PatternError> for ShellError { fn from(input: glob::PatternError) -> ShellError { ShellError::untagged_runtime_error(format!("{:?}", input)) } } pub trait CoerceInto<U> { fn coerce_into(self, operation: impl Into<String>) -> Result<U, ShellError>; } trait ToExpectedRange { fn to_expected_range() -> ExpectedRange; } macro_rules! ranged_int { ($ty:tt -> $op:tt -> $variant:tt) => { impl ToExpectedRange for $ty { fn to_expected_range() -> ExpectedRange { ExpectedRange::$variant } } impl CoerceInto<$ty> for nu_source::Tagged<BigInt> { fn coerce_into(self, operation: impl Into<String>) -> Result<$ty, ShellError> { match self.$op() { Some(v) => Ok(v), None => Err(ShellError::range_error( $ty::to_expected_range(), &self.item.spanned(self.tag.span), operation.into(), )), } } } impl CoerceInto<$ty> for nu_source::Tagged<&BigInt> { fn coerce_into(self, operation: impl Into<String>) -> Result<$ty, ShellError> { match self.$op() { Some(v) => Ok(v), None => Err(ShellError::range_error( $ty::to_expected_range(), &self.item.spanned(self.tag.span), operation.into(), )), } } } }; } ranged_int!(u8 -> to_u8 -> U8); ranged_int!(u16 -> to_u16 -> U16); ranged_int!(u32 -> to_u32 -> U32); ranged_int!(u64 -> to_u64 -> U64); ranged_int!(i8 -> to_i8 -> I8); ranged_int!(i16 -> to_i16 -> I16); ranged_int!(i32 -> to_i32 -> I32); ranged_int!(i64 -> to_i64 -> I64); macro_rules! ranged_decimal { ($ty:tt -> $op:tt -> $variant:tt) => { impl ToExpectedRange for $ty { fn to_expected_range() -> ExpectedRange { ExpectedRange::$variant } } impl CoerceInto<$ty> for nu_source::Tagged<BigDecimal> { fn coerce_into(self, operation: impl Into<String>) -> Result<$ty, ShellError> { match self.$op() { Some(v) => Ok(v), None => Err(ShellError::range_error( $ty::to_expected_range(), &self.item.spanned(self.tag.span), operation.into(), )), } } } impl CoerceInto<$ty> for nu_source::Tagged<&BigDecimal> { fn coerce_into(self, operation: impl Into<String>) -> Result<$ty, ShellError> { match self.$op() { Some(v) => Ok(v), None => Err(ShellError::range_error( $ty::to_expected_range(), &self.item.spanned(self.tag.span), operation.into(), )), } } } }; } ranged_decimal!(f32 -> to_f32 -> F32); ranged_decimal!(f64 -> to_f64 -> F64);
{ ProximateShellError::MissingProperty { subpath: subpath.map(|s| s.into()), expr: expr.map(|e| e.into()), } .start() }
embedder_test.go
package embedder import ( "strconv" "strings" "testing" ) func TestEmbedder(t *testing.T) { hW := []byte("hello, world") embedded, err := Embed("asset", hW) if err != nil { t.Fatal(err) } p1 := strings.Index(string(embedded), "{") p2 := strings.Index(string(embedded), "}") bytesOnly := string(embedded)[p1+1 : p2] bytesOnlySlice := strings.Split(bytesOnly, ",") var hW2 []byte for _, b := range bytesOnlySlice { bb, err := strconv.Atoi(strings.TrimSpace(b)) if err != nil { t.Fatal(err) } hW2 = append(hW2, byte(bb)) } if string(hW2) != "hello, world" { t.Fatal("Should be able to embed an asset") }
t.Log("Succes - embedder") }
__init__.py
from flask import Blueprint
Admin Blueprint """ blueprint = Blueprint( 'admin_blueprint', __name__, url_prefix = '/admin' ) from app.api.admin.views import admin
"""
transform.py
# # transform.py -- coordinate transforms for Ginga # # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. # import numpy as np from ginga import trcalc from ginga.misc import Bunch __all__ = ['TransformError', 'BaseTransform', 'ComposedTransform', 'InvertedTransform', 'PassThruTransform', 'WindowNativeTransform', 'CartesianWindowTransform', 'CartesianNativeTransform', 'RotationTransform', 'ScaleTransform', 'DataCartesianTransform', 'OffsetDataTransform', 'WCSDataTransform', 'get_catalog' ] class TransformError(Exception): pass class BaseTransform(object): def __init__(self): super(BaseTransform, self).__init__() def to_(self, x, y): raise TransformError("subclass should override this method") def from_(self, tx, ty): raise TransformError("subclass should override this method") def __add__(self, trans): return ComposedTransform(self, trans) def invert(self): return InvertedTransform(self) class ComposedTransform(BaseTransform): """ A transform that composes two other transforms to make a new one. """ def __init__(self, tform1, tform2): super(ComposedTransform, self).__init__() self.tform1 = tform1 self.tform2 = tform2 def to_(self, pts, **kwargs): return self.tform2.to_(self.tform1.to_(pts, **kwargs)) def from_(self, pts, **kwargs): return self.tform1.from_(self.tform2.from_(pts), **kwargs) class InvertedTransform(BaseTransform): """ A transform that inverts another transform. """ def __init__(self, tform): super(InvertedTransform, self).__init__() self.tform = tform def to_(self, pts, **kwargs): return self.tform.from_(pts, **kwargs) def from_(self, pts, **kwargs): return self.tform.to_(pts, **kwargs) class PassThruTransform(BaseTransform): """ A transform that essentially acts as a no-op. """ def __init__(self, viewer): super(PassThruTransform, self).__init__() def to_(self, pts, **kwargs): return pts def from_(self, pts, **kwargs): return pts class WindowNativeTransform(BaseTransform): """ A transform from a typical window standard coordinate space with the upper left at (0, 0) to the viewer back end native pixel space. """ def __init__(self, viewer): super(WindowNativeTransform, self).__init__() self.viewer = viewer def to_(self, win_pts): if self.viewer.origin_upper: return win_pts win_pts = np.asarray(win_pts) has_z = (win_pts.shape[-1] > 2) # invert Y coord for backends that have the origin in the lower left win_wd, win_ht = self.viewer.get_window_size() # win_x, win_y = cvs_x, win_ht - cvs_y mpy_pt = [1.0, -1.0] if has_z: mpy_pt.append(1.0) add_pt = [0.0, win_ht] if has_z: add_pt.append(0.0) ntv_pts = np.add(np.multiply(win_pts, mpy_pt), add_pt) return ntv_pts def from_(self, ntv_pts): return self.to_(ntv_pts) class WindowPercentageTransform(BaseTransform): """ A transform from standard window coordinates of a viewer to percentage coordinates. """ def __init__(self, viewer, as_int=True): super(WindowPercentageTransform, self).__init__() self.viewer = viewer self.as_int = as_int def to_(self, win_pts): win_pts = np.asarray(win_pts, dtype=np.float) has_z = (win_pts.shape[-1] > 2) max_pt = list(self.viewer.get_window_size()) if has_z: max_pt.append(0.0) pct_pts = np.divide(win_pts, max_pt) return pct_pts def from_(self, pct_pts): """Reverse of :meth:`to_`.""" pct_pts = np.asarray(pct_pts, dtype=np.float) has_z = (pct_pts.shape[-1] > 2) max_pt = list(self.viewer.get_window_size()) if has_z: max_pt.append(0.0) win_pts = np.multiply(pct_pts, max_pt) # round to pixel units, if asked if self.as_int: win_pts = np.rint(win_pts).astype(np.int, copy=False)
return win_pts class CartesianWindowTransform(BaseTransform): """ A transform from cartesian coordinates to standard window coordinates of a viewer. """ def __init__(self, viewer, as_int=True): super(CartesianWindowTransform, self).__init__() self.viewer = viewer self.as_int = as_int def to_(self, off_pts): # add center pixel to convert from X/Y coordinate space to # window graphics space off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) ctr_pt = list(self.viewer.get_center()) if has_z: ctr_pt.append(0.0) # win_x = off_x + ctr_x # win_y = ctr_y - off_y mpy_pt = [1.0, -1.0] if has_z: mpy_pt.append(1.0) win_pts = np.add(np.multiply(off_pts, mpy_pt), ctr_pt) # round to pixel units, if asked if self.as_int: win_pts = np.rint(win_pts).astype(np.int, copy=False) return win_pts def from_(self, win_pts): """Reverse of :meth:`to_`.""" # make relative to center pixel to convert from window # graphics space to standard X/Y coordinate space win_pts = np.asarray(win_pts, dtype=np.float) has_z = (win_pts.shape[-1] > 2) ctr_pt = list(self.viewer.get_center()) if has_z: ctr_pt.append(0.0) mpy_pt = [1.0, -1.0] if has_z: mpy_pt.append(1.0) # off_x = win_x - ctr_x # = win_x + -ctr_x # off_y = ctr_y - win_y # = -win_y + ctr_y ctr_pt[0] = -ctr_pt[0] off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt) return off_pts class CartesianNativeTransform(BaseTransform): """ A transform from cartesian coordinates to the native pixel coordinates of a viewer. """ def __init__(self, viewer, as_int=True): super(CartesianNativeTransform, self).__init__() self.viewer = viewer self.as_int = as_int def to_(self, off_pts): # add center pixel to convert from X/Y coordinate space to # back end graphics space off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) ctr_pt = list(self.viewer.get_center()) if has_z: ctr_pt.append(0.0) if self.viewer.origin_upper: mpy_pt = [1.0, -1.0] else: mpy_pt = [1.0, 1.0] if has_z: mpy_pt.append(1.0) win_pts = np.add(np.multiply(off_pts, mpy_pt), ctr_pt) # round to pixel units, if asked if self.as_int: win_pts = np.rint(win_pts).astype(np.int, copy=False) return win_pts def from_(self, win_pts): """Reverse of :meth:`to_`.""" # make relative to center pixel to convert from back end # graphics space to standard X/Y coordinate space win_pts = np.asarray(win_pts, dtype=np.float) has_z = (win_pts.shape[-1] > 2) ctr_pt = list(self.viewer.get_center()) if has_z: ctr_pt.append(0.0) ctr_pt[0] = -ctr_pt[0] if self.viewer.origin_upper: mpy_pt = [1.0, -1.0] else: ctr_pt[1] = -ctr_pt[1] mpy_pt = [1.0, 1.0] if has_z: mpy_pt.append(1.0) off_pts = np.add(np.multiply(win_pts, mpy_pt), ctr_pt) return off_pts class RotationTransform(BaseTransform): """ A transform in cartesian coordinates based on the flip/swap setting and rotation setting of a viewer. """ def __init__(self, viewer): super(RotationTransform, self).__init__() self.viewer = viewer def to_(self, off_pts): off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) t_ = self.viewer.t_ # flip flip_pt = [1.0, 1.0] if t_['flip_x']: flip_pt[0] = -1.0 if t_['flip_y']: flip_pt[1] = -1.0 if has_z: # no flip_z at the moment flip_pt.append(1.0) off_pts = np.multiply(off_pts, flip_pt) # swap if t_['swap_xy']: p = list(off_pts.T) off_pts = np.asarray([p[1], p[0]] + list(p[2:])).T # rotate if t_['rot_deg'] != 0: thetas = [t_['rot_deg']] offset = [0.0, 0.0] if has_z: offset.append(0.0) off_pts = trcalc.rotate_coord(off_pts, thetas, offset) return off_pts def from_(self, off_pts): """Reverse of :meth:`to_`.""" off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) t_ = self.viewer.t_ # rotate if t_['rot_deg'] != 0: thetas = [- t_['rot_deg']] offset = [0.0, 0.0] if has_z: offset.append(0.0) off_pts = trcalc.rotate_coord(off_pts, thetas, offset) # swap if t_['swap_xy']: p = list(off_pts.T) off_pts = np.asarray([p[1], p[0]] + list(p[2:])).T # flip flip_pt = [1.0, 1.0] if t_['flip_x']: flip_pt[0] = -1.0 if t_['flip_y']: flip_pt[1] = -1.0 if has_z: # no flip_z at the moment flip_pt.append(1.0) off_pts = np.multiply(off_pts, flip_pt) return off_pts class ScaleTransform(BaseTransform): """ A transform in cartesian coordinates based on the scale of a viewer. """ def __init__(self, viewer): super(ScaleTransform, self).__init__() self.viewer = viewer def to_(self, off_pts): """Reverse of :meth:`from_`.""" off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) # scale according to current settings scale_pt = [self.viewer._org_scale_x, self.viewer._org_scale_y] if has_z: scale_pt.append(self.viewer._org_scale_z) off_pts = np.multiply(off_pts, scale_pt) return off_pts def from_(self, off_pts): off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) scale_pt = [1.0 / self.viewer._org_scale_x, 1.0 / self.viewer._org_scale_y] if has_z: scale_pt.append(1.0 / self.viewer._org_scale_z) # Reverse scaling off_pts = np.multiply(off_pts, scale_pt) return off_pts class DataCartesianTransform(BaseTransform): """ A transform from data coordinates to cartesian coordinates based on a viewer's pan position. """ def __init__(self, viewer, use_center=True): super(DataCartesianTransform, self).__init__() self.viewer = viewer # If use_center is True, then the coordinates are mapped such that the # pixel is centered on the square when the image is zoomed in past # 1X. This is the specification of the FITS image standard, # that the pixel is centered on the integer row/column. self.use_center = use_center def to_(self, data_pts): """Reverse of :meth:`from_`.""" data_pts = np.asarray(data_pts, dtype=np.float) has_z = (data_pts.shape[-1] > 2) if self.use_center: data_pts = data_pts - self.viewer.data_off # subtract data indexes at center reference pixel ref_pt = [self.viewer._org_x, self.viewer._org_y] if has_z: ref_pt.append(self.viewer._org_z) off_pts = np.subtract(data_pts, ref_pt) return off_pts def from_(self, off_pts): off_pts = np.asarray(off_pts, dtype=np.float) has_z = (off_pts.shape[-1] > 2) # Add data index at center to offset # subtract data indexes at center reference pixel ref_pt = [self.viewer._org_x, self.viewer._org_y] if has_z: ref_pt.append(self.viewer._org_z) data_pts = np.add(off_pts, ref_pt) if self.use_center: data_pts = data_pts + self.viewer.data_off return data_pts class OffsetDataTransform(BaseTransform): """ A transform whose coordinate space is offsets from a point in data space. """ def __init__(self, pt): super(OffsetDataTransform, self).__init__() self.pt = pt def to_(self, delta_pts): delta_x, delta_y = np.asarray(delta_pts, dtype=np.float).T ref_x, ref_y = self.pt[:2] res_x, res_y = ref_x + delta_x, ref_y + delta_y return np.asarray((res_x, res_y)).T def from_(self, data_pts): data_x, data_y = np.asarray(data_pts, dtype=np.float).T ref_x, ref_y = self.pt[:2] res_x, res_y = data_x - ref_x, data_y - ref_y return np.asarray((res_x, res_y)).T class WCSDataTransform(BaseTransform): """ A transform whose coordinate space is based on the WCS of the primary image loaded in a viewer. """ def __init__(self, viewer): super(WCSDataTransform, self).__init__() self.viewer = viewer def to_(self, wcs_pts): wcs_pts = np.asarray(wcs_pts) # hack to work around passing singleton pt vs. array of pts unpack = False if len(wcs_pts.shape) < 2: # passed a single coordinate wcs_pts = np.asarray([wcs_pts]) unpack = True image = self.viewer.get_image() if image is None: raise TransformError("No image, no WCS") wcs = image.wcs if wcs is None: raise TransformError("No valid WCS found in image") naxispath = image.naxispath res = wcs.wcspt_to_datapt(wcs_pts, naxispath=naxispath) if unpack: return res[0] return res def from_(self, data_pts): data_pts = np.asarray(data_pts) # hack to work around passing singleton pt vs. array of pts unpack = False if len(data_pts.shape) < 2: # passed a single coordinate data_pts = np.asarray([data_pts]) unpack = True image = self.viewer.get_image() if image is None: raise TransformError("No image, no WCS") wcs = image.wcs if wcs is None: raise TransformError("No valid WCS found in image") naxispath = image.naxispath res = wcs.datapt_to_wcspt(data_pts, naxispath=naxispath) if unpack: return res[0] return res def get_catalog(): """Returns a catalog of available transforms. These are used to build chains for rendering with different back ends. """ tforms = {} for name, value in list(globals().items()): if name.endswith('Transform'): tforms[name] = value return Bunch.Bunch(tforms, caseless=True) #END
table-row-def.directive.ts
import { CdkRowDef } from '@angular/cdk/table'; import { Directive, Input } from '@angular/core'; /** * Data row definition for the aui-table. * Captures the header row's template and other row properties such as the columns to display and * a when predicate that describes when this row should be used. */ @Directive({ selector: '[auiTableRowDef]', providers: [{ provide: CdkRowDef, useExisting: TableRowDefDirective }],
columns: string[]; @Input('auiTableRowDefWhen') when: (index: number, rowData: T) => boolean; }
}) export class TableRowDefDirective<T> extends CdkRowDef<T> { @Input('auiTableRowDefColumns')
sty.rs
//! This module contains `TyKind` and its major components. use hir; use hir::def_id::DefId; use infer::canonical::Canonical; use mir::interpret::ConstValue; use middle::region; use polonius_engine::Atom; use rustc_data_structures::indexed_vec::Idx; use ty::subst::{Substs, Subst, Kind, UnpackedKind}; use ty::{self, AdtDef, TypeFlags, Ty, TyCtxt, TypeFoldable}; use ty::{List, TyS, ParamEnvAnd, ParamEnv}; use util::captures::Captures; use mir::interpret::{Scalar, Pointer}; use smallvec::SmallVec; use std::iter; use std::cmp::Ordering; use rustc_target::spec::abi; use syntax::ast::{self, Ident}; use syntax::symbol::{keywords, InternedString}; use serialize; use self::InferTy::*; use self::TyKind::*; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct TypeAndMut<'tcx> { pub ty: Ty<'tcx>, pub mutbl: hir::Mutability, } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy)] /// A "free" region `fr` can be interpreted as "some region /// at least as big as the scope `fr.scope`". pub struct FreeRegion { pub scope: DefId, pub bound_region: BoundRegion, } #[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, RustcEncodable, RustcDecodable, Copy)] pub enum BoundRegion { /// An anonymous region parameter for a given fn (&T) BrAnon(u32), /// Named region parameters for functions (a in &'a T) /// /// The def-id is needed to distinguish free regions in /// the event of shadowing. BrNamed(DefId, InternedString), /// Fresh bound identifiers created during GLB computations. BrFresh(u32), /// Anonymous region for the implicit env pointer parameter /// to a closure BrEnv, } impl BoundRegion { pub fn is_named(&self) -> bool { match *self { BoundRegion::BrNamed(..) => true, _ => false, } } /// When canonicalizing, we replace unbound inference variables and free /// regions with anonymous late bound regions. This method asserts that /// we have an anonymous late bound region, which hence may refer to /// a canonical variable. pub fn assert_bound_var(&self) -> BoundVar { match *self { BoundRegion::BrAnon(var) => BoundVar::from_u32(var), _ => bug!("bound region is not anonymous"), } } } /// N.B., if you change this, you'll probably want to change the corresponding /// AST structure in `libsyntax/ast.rs` as well. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum TyKind<'tcx> { /// The primitive boolean type. Written as `bool`. Bool, /// The primitive character type; holds a Unicode scalar value /// (a non-surrogate code point). Written as `char`. Char, /// A primitive signed integer type. For example, `i32`. Int(ast::IntTy), /// A primitive unsigned integer type. For example, `u32`. Uint(ast::UintTy), /// A primitive floating-point type. For example, `f64`. Float(ast::FloatTy), /// Structures, enumerations and unions. /// /// Substs here, possibly against intuition, *may* contain `Param`s. /// That is, even after substitution it is possible that there are type /// variables. This happens when the `Adt` corresponds to an ADT /// definition and not a concrete use of it. Adt(&'tcx AdtDef, &'tcx Substs<'tcx>), Foreign(DefId), /// The pointee of a string slice. Written as `str`. Str, /// An array with the given length. Written as `[T; n]`. Array(Ty<'tcx>, &'tcx ty::Const<'tcx>), /// The pointee of an array slice. Written as `[T]`. Slice(Ty<'tcx>), /// A raw pointer. Written as `*mut T` or `*const T` RawPtr(TypeAndMut<'tcx>), /// A reference; a pointer with an associated lifetime. Written as /// `&'a mut T` or `&'a T`. Ref(Region<'tcx>, Ty<'tcx>, hir::Mutability), /// The anonymous type of a function declaration/definition. Each /// function has a unique type, which is output (for a function /// named `foo` returning an `i32`) as `fn() -> i32 {foo}`. /// /// For example the type of `bar` here: /// /// ```rust /// fn foo() -> i32 { 1 } /// let bar = foo; // bar: fn() -> i32 {foo} /// ``` FnDef(DefId, &'tcx Substs<'tcx>), /// A pointer to a function. Written as `fn() -> i32`. /// /// For example the type of `bar` here: /// /// ```rust /// fn foo() -> i32 { 1 } /// let bar: fn() -> i32 = foo; /// ``` FnPtr(PolyFnSig<'tcx>), /// A trait, defined with `trait`. Dynamic(Binder<&'tcx List<ExistentialPredicate<'tcx>>>, ty::Region<'tcx>), /// The anonymous type of a closure. Used to represent the type of /// `|a| a`. Closure(DefId, ClosureSubsts<'tcx>), /// The anonymous type of a generator. Used to represent the type of /// `|a| yield a`. Generator(DefId, GeneratorSubsts<'tcx>, hir::GeneratorMovability), /// A type representin the types stored inside a generator. /// This should only appear in GeneratorInteriors. GeneratorWitness(Binder<&'tcx List<Ty<'tcx>>>), /// The never type `!` Never, /// A tuple type. For example, `(i32, bool)`. Tuple(&'tcx List<Ty<'tcx>>), /// The projection of an associated type. For example, /// `<T as Trait<..>>::N`. Projection(ProjectionTy<'tcx>), /// A placeholder type used when we do not have enough information /// to normalize the projection of an associated type to an /// existing concrete type. Currently only used with chalk-engine. UnnormalizedProjection(ProjectionTy<'tcx>), /// Opaque (`impl Trait`) type found in a return type. /// The `DefId` comes either from /// * the `impl Trait` ast::Ty node, /// * or the `existential type` declaration /// The substitutions are for the generics of the function in question. /// After typeck, the concrete type can be found in the `types` map. Opaque(DefId, &'tcx Substs<'tcx>), /// A type parameter; for example, `T` in `fn f<T>(x: T) {} Param(ParamTy), /// Bound type variable, used only when preparing a trait query. Bound(ty::DebruijnIndex, BoundTy), /// A placeholder type - universally quantified higher-ranked type. Placeholder(ty::PlaceholderType), /// A type variable used during type checking. Infer(InferTy), /// A placeholder for a type which could not be computed; this is /// propagated to avoid useless error messages. Error, } // `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger. #[cfg(target_arch = "x86_64")] static_assert!(MEM_SIZE_OF_TY_KIND: ::std::mem::size_of::<TyKind<'_>>() == 24); /// A closure can be modeled as a struct that looks like: /// /// struct Closure<'l0...'li, T0...Tj, CK, CS, U0...Uk> { /// upvar0: U0, /// ... /// upvark: Uk /// } /// /// where: /// /// - 'l0...'li and T0...Tj are the lifetime and type parameters /// in scope on the function that defined the closure, /// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This /// is rather hackily encoded via a scalar type. See /// `TyS::to_opt_closure_kind` for details. /// - CS represents the *closure signature*, representing as a `fn()` /// type. For example, `fn(u32, u32) -> u32` would mean that the closure /// implements `CK<(u32, u32), Output = u32>`, where `CK` is the trait /// specified above. /// - U0...Uk are type parameters representing the types of its upvars /// (borrowed, if appropriate; that is, if Ui represents a by-ref upvar, /// and the up-var has the type `Foo`, then `Ui = &Foo`). /// /// So, for example, given this function: /// /// fn foo<'a, T>(data: &'a mut T) { /// do(|| data.count += 1) /// } /// /// the type of the closure would be something like: /// /// struct Closure<'a, T, U0> { /// data: U0 /// } /// /// Note that the type of the upvar is not specified in the struct. /// You may wonder how the impl would then be able to use the upvar, /// if it doesn't know it's type? The answer is that the impl is /// (conceptually) not fully generic over Closure but rather tied to /// instances with the expected upvar types: /// /// impl<'b, 'a, T> FnMut() for Closure<'a, T, &'b mut &'a mut T> { /// ... /// } /// /// You can see that the *impl* fully specified the type of the upvar /// and thus knows full well that `data` has type `&'b mut &'a mut T`. /// (Here, I am assuming that `data` is mut-borrowed.) /// /// Now, the last question you may ask is: Why include the upvar types /// as extra type parameters? The reason for this design is that the /// upvar types can reference lifetimes that are internal to the /// creating function. In my example above, for example, the lifetime /// `'b` represents the scope of the closure itself; this is some /// subset of `foo`, probably just the scope of the call to the to /// `do()`. If we just had the lifetime/type parameters from the /// enclosing function, we couldn't name this lifetime `'b`. Note that /// there can also be lifetimes in the types of the upvars themselves, /// if one of them happens to be a reference to something that the /// creating fn owns. /// /// OK, you say, so why not create a more minimal set of parameters /// that just includes the extra lifetime parameters? The answer is /// primarily that it would be hard --- we don't know at the time when /// we create the closure type what the full types of the upvars are, /// nor do we know which are borrowed and which are not. In this /// design, we can just supply a fresh type parameter and figure that /// out later. /// /// All right, you say, but why include the type parameters from the /// original function then? The answer is that codegen may need them /// when monomorphizing, and they may not appear in the upvars. A /// closure could capture no variables but still make use of some /// in-scope type parameter with a bound (e.g., if our example above /// had an extra `U: Default`, and the closure called `U::default()`). /// /// There is another reason. This design (implicitly) prohibits /// closures from capturing themselves (except via a trait /// object). This simplifies closure inference considerably, since it /// means that when we infer the kind of a closure or its upvars, we /// don't have to handle cycles where the decisions we make for /// closure C wind up influencing the decisions we ought to make for /// closure C (which would then require fixed point iteration to /// handle). Plus it fixes an ICE. :P /// /// ## Generators /// /// Perhaps surprisingly, `ClosureSubsts` are also used for /// generators. In that case, what is written above is only half-true /// -- the set of type parameters is similar, but the role of CK and /// CS are different. CK represents the "yield type" and CS /// represents the "return type" of the generator. /// /// It'd be nice to split this struct into ClosureSubsts and /// GeneratorSubsts, I believe. -nmatsakis #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function, /// concatenated with the types of the upvars. /// /// These are separated out because codegen wants to pass them around /// when monomorphizing. pub substs: &'tcx Substs<'tcx>, } /// Struct returned by `split()`. Note that these are subslices of the /// parent slice and not canonical substs themselves. struct SplitClosureSubsts<'tcx> { closure_kind_ty: Ty<'tcx>, closure_sig_ty: Ty<'tcx>, upvar_kinds: &'tcx [Kind<'tcx>], } impl<'tcx> ClosureSubsts<'tcx> { /// Divides the closure substs into their respective /// components. Single source of truth with respect to the /// ordering. fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitClosureSubsts<'tcx> { let generics = tcx.generics_of(def_id); let parent_len = generics.parent_count; SplitClosureSubsts { closure_kind_ty: self.substs.type_at(parent_len), closure_sig_ty: self.substs.type_at(parent_len + 1), upvar_kinds: &self.substs[parent_len + 2..], } } #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let SplitClosureSubsts { upvar_kinds, .. } = self.split(def_id, tcx); upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } /// Returns the closure kind for this closure; may return a type /// variable during inference. To get the closure kind during /// inference, use `infcx.closure_kind(def_id, substs)`. pub fn closure_kind_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).closure_kind_ty } /// Returns the type representing the closure signature for this /// closure; may contain type variables during inference. To get /// the closure signature during inference, use /// `infcx.fn_sig(def_id)`. pub fn closure_sig_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).closure_sig_ty } /// Returns the closure kind for this closure; only usable outside /// of an inference context, because in that context we know that /// there are no type variables. /// /// If you have an inference context, use `infcx.closure_kind()`. pub fn closure_kind(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::ClosureKind { self.split(def_id, tcx).closure_kind_ty.to_opt_closure_kind().unwrap() } /// Extracts the signature from the closure; only usable outside /// of an inference context, because in that context we know that /// there are no type variables. /// /// If you have an inference context, use `infcx.closure_sig()`. pub fn closure_sig(self, def_id: DefId, tcx: TyCtxt<'_, 'tcx, 'tcx>) -> ty::PolyFnSig<'tcx> { match self.closure_sig_ty(def_id, tcx).sty { ty::FnPtr(sig) => sig, ref t => bug!("closure_sig_ty is not a fn-ptr: {:?}", t), } } } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct GeneratorSubsts<'tcx> { pub substs: &'tcx Substs<'tcx>, } struct SplitGeneratorSubsts<'tcx> { yield_ty: Ty<'tcx>, return_ty: Ty<'tcx>, witness: Ty<'tcx>, upvar_kinds: &'tcx [Kind<'tcx>], } impl<'tcx> GeneratorSubsts<'tcx> { fn split(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> SplitGeneratorSubsts<'tcx> { let generics = tcx.generics_of(def_id); let parent_len = generics.parent_count; SplitGeneratorSubsts { yield_ty: self.substs.type_at(parent_len), return_ty: self.substs.type_at(parent_len + 1), witness: self.substs.type_at(parent_len + 2), upvar_kinds: &self.substs[parent_len + 3..], } } /// This describes the types that can be contained in a generator. /// It will be a type variable initially and unified in the last stages of typeck of a body. /// It contains a tuple of all the types that could end up on a generator frame. /// The state transformation MIR pass may only produce layouts which mention types /// in this tuple. Upvars are not counted here. pub fn witness(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).witness } #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let SplitGeneratorSubsts { upvar_kinds, .. } = self.split(def_id, tcx); upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } /// Returns the type representing the yield type of the generator. pub fn yield_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).yield_ty } /// Returns the type representing the return type of the generator. pub fn return_ty(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> Ty<'tcx> { self.split(def_id, tcx).return_ty } /// Return the "generator signature", which consists of its yield /// and return types. /// /// NB. Some bits of the code prefers to see this wrapped in a /// binder, but it never contains bound regions. Probably this /// function should be removed. pub fn poly_sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> PolyGenSig<'tcx> { ty::Binder::dummy(self.sig(def_id, tcx)) } /// Return the "generator signature", which consists of its yield /// and return types. pub fn sig(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> GenSig<'tcx> { ty::GenSig { yield_ty: self.yield_ty(def_id, tcx), return_ty: self.return_ty(def_id, tcx), } } } impl<'a, 'gcx, 'tcx> GeneratorSubsts<'tcx> { /// This returns the types of the MIR locals which had to be stored across suspension points. /// It is calculated in rustc_mir::transform::generator::StateTransform. /// All the types here must be in the tuple in GeneratorInterior. pub fn state_tys( self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>, ) -> impl Iterator<Item=Ty<'tcx>> + Captures<'gcx> + 'a { let state = tcx.generator_layout(def_id).fields.iter(); state.map(move |d| d.ty.subst(tcx, self.substs)) } /// This is the types of the fields of a generate which /// is available before the generator transformation. /// It includes the upvars and the state discriminant which is u32. pub fn pre_transforms_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> impl Iterator<Item=Ty<'tcx>> + 'a { self.upvar_tys(def_id, tcx).chain(iter::once(tcx.types.u32)) } /// This is the types of all the fields stored in a generator. /// It includes the upvars, state types and the state discriminant which is u32. pub fn field_tys(self, def_id: DefId, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> impl Iterator<Item=Ty<'tcx>> + Captures<'gcx> + 'a { self.pre_transforms_tys(def_id, tcx).chain(self.state_tys(def_id, tcx)) } } #[derive(Debug, Copy, Clone)] pub enum UpvarSubsts<'tcx> { Closure(ClosureSubsts<'tcx>), Generator(GeneratorSubsts<'tcx>), } impl<'tcx> UpvarSubsts<'tcx> { #[inline] pub fn upvar_tys(self, def_id: DefId, tcx: TyCtxt<'_, '_, '_>) -> impl Iterator<Item=Ty<'tcx>> + 'tcx { let upvar_kinds = match self { UpvarSubsts::Closure(substs) => substs.split(def_id, tcx).upvar_kinds, UpvarSubsts::Generator(substs) => substs.split(def_id, tcx).upvar_kinds, }; upvar_kinds.iter().map(|t| { if let UnpackedKind::Type(ty) = t.unpack() { ty } else { bug!("upvar should be type") } }) } } #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, RustcEncodable, RustcDecodable)] pub enum ExistentialPredicate<'tcx> { /// e.g., Iterator Trait(ExistentialTraitRef<'tcx>), /// e.g., Iterator::Item = T Projection(ExistentialProjection<'tcx>), /// e.g., Send AutoTrait(DefId), } impl<'a, 'gcx, 'tcx> ExistentialPredicate<'tcx> { /// Compares via an ordering that will not change if modules are reordered or other changes are /// made to the tree. In particular, this ordering is preserved across incremental compilations. pub fn stable_cmp(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, other: &Self) -> Ordering { use self::ExistentialPredicate::*; match (*self, *other) { (Trait(_), Trait(_)) => Ordering::Equal, (Projection(ref a), Projection(ref b)) => tcx.def_path_hash(a.item_def_id).cmp(&tcx.def_path_hash(b.item_def_id)), (AutoTrait(ref a), AutoTrait(ref b)) => tcx.trait_def(*a).def_path_hash.cmp(&tcx.trait_def(*b).def_path_hash), (Trait(_), _) => Ordering::Less, (Projection(_), Trait(_)) => Ordering::Greater, (Projection(_), _) => Ordering::Less, (AutoTrait(_), _) => Ordering::Greater, } } } impl<'a, 'gcx, 'tcx> Binder<ExistentialPredicate<'tcx>> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> { use ty::ToPredicate; match *self.skip_binder() { ExistentialPredicate::Trait(tr) => Binder(tr).with_self_ty(tcx, self_ty).to_predicate(), ExistentialPredicate::Projection(p) => ty::Predicate::Projection(Binder(p.with_self_ty(tcx, self_ty))), ExistentialPredicate::AutoTrait(did) => { let trait_ref = Binder(ty::TraitRef { def_id: did, substs: tcx.mk_substs_trait(self_ty, &[]), }); trait_ref.to_predicate() } } } } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx List<ExistentialPredicate<'tcx>> {} impl<'tcx> List<ExistentialPredicate<'tcx>> { pub fn principal(&self) -> ExistentialTraitRef<'tcx> { match self[0] { ExistentialPredicate::Trait(tr) => tr, other => bug!("first predicate is {:?}", other), } } #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator<Item=ExistentialProjection<'tcx>> + 'a { self.iter().filter_map(|predicate| { match *predicate { ExistentialPredicate::Projection(p) => Some(p), _ => None, } }) } #[inline] pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item=DefId> + 'a { self.iter().filter_map(|predicate| { match *predicate { ExistentialPredicate::AutoTrait(d) => Some(d), _ => None } }) } } impl<'tcx> Binder<&'tcx List<ExistentialPredicate<'tcx>>> { pub fn principal(&self) -> PolyExistentialTraitRef<'tcx> { Binder::bind(self.skip_binder().principal()) } #[inline] pub fn projection_bounds<'a>(&'a self) -> impl Iterator<Item=PolyExistentialProjection<'tcx>> + 'a { self.skip_binder().projection_bounds().map(Binder::bind) } #[inline] pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item=DefId> + 'a { self.skip_binder().auto_traits() } pub fn iter<'a>(&'a self) -> impl DoubleEndedIterator<Item=Binder<ExistentialPredicate<'tcx>>> + 'tcx { self.skip_binder().iter().cloned().map(Binder::bind) } } /// A complete reference to a trait. These take numerous guises in syntax, /// but perhaps the most recognizable form is in a where clause: /// /// T: Foo<U> /// /// This would be represented by a trait-reference where the def-id is the /// def-id for the trait `Foo` and the substs define `T` as parameter 0, /// and `U` as parameter 1. /// /// Trait references also appear in object types like `Foo<U>`, but in /// that case the `Self` parameter is absent from the substitutions. /// /// Note that a `TraitRef` introduces a level of region binding, to /// account for higher-ranked trait bounds like `T: for<'a> Foo<&'a U>` /// or higher-ranked object types. #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct TraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, } impl<'tcx> TraitRef<'tcx> { pub fn new(def_id: DefId, substs: &'tcx Substs<'tcx>) -> TraitRef<'tcx> { TraitRef { def_id: def_id, substs: substs } } /// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi` /// are the parameters defined on trait. pub fn identity<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> TraitRef<'tcx> { TraitRef { def_id, substs: Substs::identity_for_item(tcx, def_id), } } #[inline] pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } pub fn input_types<'a>(&'a self) -> impl DoubleEndedIterator<Item = Ty<'tcx>> + 'a { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude // associated types. self.substs.types() } pub fn from_method(tcx: TyCtxt<'_, '_, 'tcx>, trait_id: DefId, substs: &Substs<'tcx>) -> ty::TraitRef<'tcx> { let defs = tcx.generics_of(trait_id); ty::TraitRef { def_id: trait_id, substs: tcx.intern_substs(&substs[..defs.params.len()]) } } } pub type PolyTraitRef<'tcx> = Binder<TraitRef<'tcx>>; impl<'tcx> PolyTraitRef<'tcx> { pub fn self_ty(&self) -> Ty<'tcx> { self.skip_binder().self_ty() } pub fn def_id(&self) -> DefId { self.skip_binder().def_id } pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> { // Note that we preserve binding levels Binder(ty::TraitPredicate { trait_ref: self.skip_binder().clone() }) } } /// An existential reference to a trait, where `Self` is erased. /// For example, the trait object `Trait<'a, 'b, X, Y>` is: /// /// exists T. T: Trait<'a, 'b, X, Y> /// /// The substitutions don't include the erased `Self`, only trait /// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ExistentialTraitRef<'tcx> { pub def_id: DefId, pub substs: &'tcx Substs<'tcx>, } impl<'a, 'gcx, 'tcx> ExistentialTraitRef<'tcx> { pub fn input_types<'b>(&'b self) -> impl DoubleEndedIterator<Item=Ty<'tcx>> + 'b { // Select only the "input types" from a trait-reference. For // now this is all the types that appear in the // trait-reference, but it should eventually exclude // associated types. self.substs.types() } pub fn erase_self_ty(tcx: TyCtxt<'a, 'gcx, 'tcx>, trait_ref: ty::TraitRef<'tcx>) -> ty::ExistentialTraitRef<'tcx> { // Assert there is a Self. trait_ref.substs.type_at(0); ty::ExistentialTraitRef { def_id: trait_ref.def_id, substs: tcx.intern_substs(&trait_ref.substs[1..]) } } /// Object types don't have a self-type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, /// you must give *some* self-type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> { // otherwise the escaping vars would be captured by the binder // debug_assert!(!self_ty.has_escaping_bound_vars()); ty::TraitRef { def_id: self.def_id, substs: tcx.mk_substs_trait(self_ty, self.substs) } } } pub type PolyExistentialTraitRef<'tcx> = Binder<ExistentialTraitRef<'tcx>>; impl<'tcx> PolyExistentialTraitRef<'tcx> { pub fn def_id(&self) -> DefId { self.skip_binder().def_id } /// Object types don't have a self-type specified. Therefore, when /// we convert the principal trait-ref into a normal trait-ref, /// you must give *some* self-type. A common choice is `mk_err()` /// or some placeholder type. pub fn with_self_ty(&self, tcx: TyCtxt<'_, '_, 'tcx>, self_ty: Ty<'tcx>) -> ty::PolyTraitRef<'tcx> { self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty)) } } /// Binder is a binder for higher-ranked lifetimes or types. It is part of the /// compiler's representation for things like `for<'a> Fn(&'a isize)` /// (which would be represented by the type `PolyTraitRef == /// Binder<TraitRef>`). Note that when we instantiate, /// erase, or otherwise "discharge" these bound vars, we change the /// type from `Binder<T>` to just `T` (see /// e.g., `liberate_late_bound_regions`). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct Binder<T>(T); impl<T> Binder<T> { /// Wraps `value` in a binder, asserting that `value` does not /// contain any bound vars that would be bound by the /// binder. This is commonly used to 'inject' a value T into a /// different binding level. pub fn dummy<'tcx>(value: T) -> Binder<T> where T: TypeFoldable<'tcx> { debug_assert!(!value.has_escaping_bound_vars()); Binder(value) } /// Wraps `value` in a binder, binding higher-ranked vars (if any). pub fn bind<'tcx>(value: T) -> Binder<T> { Binder(value) } /// Skips the binder and returns the "bound" value. This is a /// risky thing to do because it's easy to get confused about /// debruijn indices and the like. It is usually better to /// discharge the binder using `no_bound_vars` or /// `replace_late_bound_regions` or something like /// that. `skip_binder` is only valid when you are either /// extracting data that has nothing to do with bound vars, you /// are doing some sort of test that does not involve bound /// regions, or you are being very careful about your depth /// accounting. /// /// Some examples where `skip_binder` is reasonable: /// /// - extracting the def-id from a PolyTraitRef; /// - comparing the self type of a PolyTraitRef to see if it is equal to /// a type parameter `X`, since the type `X` does not reference any regions pub fn skip_binder(&self) -> &T { &self.0 } pub fn as_ref(&self) -> Binder<&T> { Binder(&self.0) } pub fn map_bound_ref<F, U>(&self, f: F) -> Binder<U> where F: FnOnce(&T) -> U { self.as_ref().map_bound(f) } pub fn map_bound<F, U>(self, f: F) -> Binder<U> where F: FnOnce(T) -> U { Binder(f(self.0)) } /// Unwraps and returns the value within, but only if it contains /// no bound vars at all. (In other words, if this binder -- /// and indeed any enclosing binder -- doesn't bind anything at /// all.) Otherwise, returns `None`. /// /// (One could imagine having a method that just unwraps a single /// binder, but permits late-bound vars bound by enclosing /// binders, but that would require adjusting the debruijn /// indices, and given the shallow binding structure we often use, /// would not be that useful.) pub fn no_bound_vars<'tcx>(self) -> Option<T> where T: TypeFoldable<'tcx> { if self.skip_binder().has_escaping_bound_vars() { None } else { Some(self.skip_binder().clone()) } } /// Given two things that have the same binder level, /// and an operation that wraps on their contents, execute the operation /// and then wrap its result. /// /// `f` should consider bound regions at depth 1 to be free, and /// anything it produces with bound regions at depth 1 will be /// bound in the resulting return value. pub fn fuse<U,F,R>(self, u: Binder<U>, f: F) -> Binder<R> where F: FnOnce(T, U) -> R { Binder(f(self.0, u.0)) } /// Split the contents into two things that share the same binder /// level as the original, returning two distinct binders. /// /// `f` should consider bound regions at depth 1 to be free, and /// anything it produces with bound regions at depth 1 will be /// bound in the resulting return values. pub fn split<U,V,F>(self, f: F) -> (Binder<U>, Binder<V>) where F: FnOnce(T) -> (U, V) { let (u, v) = f(self.0); (Binder(u), Binder(v)) } } /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `<T as Trait<..>>::N`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. pub substs: &'tcx Substs<'tcx>, /// The `DefId` of the `TraitItem` for the associated type `N`. /// /// Note that this is not the `DefId` of the `TraitRef` containing this /// associated type, which is in `tcx.associated_item(item_def_id).container`. pub item_def_id: DefId, } impl<'a, 'tcx> ProjectionTy<'tcx> { /// Construct a `ProjectionTy` by searching the trait from `trait_ref` for the /// associated item named `item_name`. pub fn from_ref_and_name( tcx: TyCtxt<'_, '_, '_>, trait_ref: ty::TraitRef<'tcx>, item_name: Ident ) -> ProjectionTy<'tcx> { let item_def_id = tcx.associated_items(trait_ref.def_id).find(|item| { item.kind == ty::AssociatedKind::Type && tcx.hygienic_eq(item_name, item.ident, trait_ref.def_id) }).unwrap().def_id; ProjectionTy { substs: trait_ref.substs, item_def_id, } } /// Extracts the underlying trait reference from this projection. /// For example, if this is a projection of `<T as Iterator>::Item`, /// then this function would return a `T: Iterator` trait reference. pub fn trait_ref(&self, tcx: TyCtxt<'_, '_, '_>) -> ty::TraitRef<'tcx> { let def_id = tcx.associated_item(self.item_def_id).container.id(); ty::TraitRef { def_id, substs: self.substs, } } pub fn self_ty(&self) -> Ty<'tcx> { self.substs.type_at(0) } } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)] pub struct GenSig<'tcx> { pub yield_ty: Ty<'tcx>, pub return_ty: Ty<'tcx>, } pub type PolyGenSig<'tcx> = Binder<GenSig<'tcx>>; impl<'tcx> PolyGenSig<'tcx> { pub fn yield_ty(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|sig| sig.yield_ty) } pub fn return_ty(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|sig| sig.return_ty) } } /// Signature of a function type, which I have arbitrarily /// decided to use to refer to the input/output types. /// /// - `inputs` is the list of arguments and their modes. /// - `output` is the return type. /// - `variadic` indicates whether this is a variadic function. (only true for foreign fns) #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FnSig<'tcx> { pub inputs_and_output: &'tcx List<Ty<'tcx>>, pub variadic: bool, pub unsafety: hir::Unsafety, pub abi: abi::Abi, } impl<'tcx> FnSig<'tcx> { pub fn inputs(&self) -> &'tcx [Ty<'tcx>] { &self.inputs_and_output[..self.inputs_and_output.len() - 1] } pub fn output(&self) -> Ty<'tcx> { self.inputs_and_output[self.inputs_and_output.len() - 1] } } pub type PolyFnSig<'tcx> = Binder<FnSig<'tcx>>; impl<'tcx> PolyFnSig<'tcx> { #[inline] pub fn inputs(&self) -> Binder<&'tcx [Ty<'tcx>]> { self.map_bound_ref(|fn_sig| fn_sig.inputs()) } #[inline] pub fn input(&self, index: usize) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|fn_sig| fn_sig.inputs()[index]) } pub fn inputs_and_output(&self) -> ty::Binder<&'tcx List<Ty<'tcx>>> { self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output) } #[inline] pub fn output(&self) -> ty::Binder<Ty<'tcx>> { self.map_bound_ref(|fn_sig| fn_sig.output()) } pub fn variadic(&self) -> bool { self.skip_binder().variadic } pub fn unsafety(&self) -> hir::Unsafety { self.skip_binder().unsafety } pub fn abi(&self) -> abi::Abi { self.skip_binder().abi } } pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder<FnSig<'tcx>>>; #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct ParamTy { pub idx: u32, pub name: InternedString, } impl<'a, 'gcx, 'tcx> ParamTy { pub fn new(index: u32, name: InternedString) -> ParamTy { ParamTy { idx: index, name: name } } pub fn for_self() -> ParamTy { ParamTy::new(0, keywords::SelfUpper.name().as_interned_str()) } pub fn for_def(def: &ty::GenericParamDef) -> ParamTy { ParamTy::new(def.index, def.name) } pub fn to_ty(self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { tcx.mk_ty_param(self.idx, self.name) } pub fn is_self(&self) -> bool { // FIXME(#50125): Ignoring `Self` with `idx != 0` might lead to weird behavior elsewhere, // but this should only be possible when using `-Z continue-parse-after-error` like // `compile-fail/issue-36638.rs`. self.name == keywords::SelfUpper.name().as_str() && self.idx == 0 } } /// A [De Bruijn index][dbi] is a standard means of representing /// regions (and perhaps later types) in a higher-ranked setting. In /// particular, imagine a type like this: /// /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char) /// ^ ^ | | | /// | | | | | /// | +------------+ 0 | | /// | | | /// +--------------------------------+ 1 | /// | | /// +------------------------------------------+ 0 /// /// In this type, there are two binders (the outer fn and the inner /// fn). We need to be able to determine, for any given region, which /// fn type it is bound by, the inner or the outer one. There are /// various ways you can do this, but a De Bruijn index is one of the /// more convenient and has some nice properties. The basic idea is to /// count the number of binders, inside out. Some examples should help /// clarify what I mean. /// /// Let's start with the reference type `&'b isize` that is the first /// argument to the inner function. This region `'b` is assigned a De /// Bruijn index of 0, meaning "the innermost binder" (in this case, a /// fn). The region `'a` that appears in the second argument type (`&'a /// isize`) would then be assigned a De Bruijn index of 1, meaning "the /// second-innermost binder". (These indices are written on the arrays /// in the diagram). /// /// What is interesting is that De Bruijn index attached to a particular /// variable will vary depending on where it appears. For example, /// the final type `&'a char` also refers to the region `'a` declared on /// the outermost fn. But this time, this reference is not nested within /// any other binders (i.e., it is not an argument to the inner fn, but /// rather the outer one). Therefore, in this case, it is assigned a /// De Bruijn index of 0, because the innermost binder in that location /// is the outer fn. /// /// [dbi]: http://en.wikipedia.org/wiki/De_Bruijn_index newtype_index! { pub struct DebruijnIndex { DEBUG_FORMAT = "DebruijnIndex({})", const INNERMOST = 0, } } pub type Region<'tcx> = &'tcx RegionKind; /// Representation of regions. /// /// Unlike types, most region variants are "fictitious", not concrete, /// regions. Among these, `ReStatic`, `ReEmpty` and `ReScope` are the only /// ones representing concrete regions. /// /// ## Bound Regions /// /// These are regions that are stored behind a binder and must be substituted /// with some concrete region before being used. There are 2 kind of /// bound regions: early-bound, which are bound in an item's Generics, /// and are substituted by a Substs, and late-bound, which are part of /// higher-ranked types (e.g., `for<'a> fn(&'a ())`) and are substituted by /// the likes of `liberate_late_bound_regions`. The distinction exists /// because higher-ranked lifetimes aren't supported in all places. See [1][2]. /// /// Unlike Param-s, bound regions are not supposed to exist "in the wild" /// outside their binder, e.g., in types passed to type inference, and /// should first be substituted (by placeholder regions, free regions, /// or region variables). /// /// ## Placeholder and Free Regions /// /// One often wants to work with bound regions without knowing their precise /// identity. For example, when checking a function, the lifetime of a borrow /// can end up being assigned to some region parameter. In these cases, /// it must be ensured that bounds on the region can't be accidentally /// assumed without being checked. /// /// To do this, we replace the bound regions with placeholder markers, /// which don't satisfy any relation not explicitly provided. /// /// There are 2 kinds of placeholder regions in rustc: `ReFree` and /// `RePlaceholder`. When checking an item's body, `ReFree` is supposed /// to be used. These also support explicit bounds: both the internally-stored /// *scope*, which the region is assumed to outlive, as well as other /// relations stored in the `FreeRegionMap`. Note that these relations /// aren't checked when you `make_subregion` (or `eq_types`), only by /// `resolve_regions_and_report_errors`. /// /// When working with higher-ranked types, some region relations aren't /// yet known, so you can't just call `resolve_regions_and_report_errors`. /// `RePlaceholder` is designed for this purpose. In these contexts, /// there's also the risk that some inference variable laying around will /// get unified with your placeholder region: if you want to check whether /// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a` /// with a placeholder region `'%a`, the variable `'_` would just be /// instantiated to the placeholder region `'%a`, which is wrong because /// the inference variable is supposed to satisfy the relation /// *for every value of the placeholder region*. To ensure that doesn't /// happen, you can use `leak_check`. This is more clearly explained /// by the [rustc guide]. /// /// [1]: http://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/ /// [2]: http://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/ /// [rustc guide]: https://rust-lang.github.io/rustc-guide/traits/hrtb.html #[derive(Clone, PartialEq, Eq, Hash, Copy, RustcEncodable, RustcDecodable, PartialOrd, Ord)] pub enum RegionKind { // Region bound in a type or fn declaration which will be // substituted 'early' -- that is, at the same time when type // parameters are substituted. ReEarlyBound(EarlyBoundRegion), // Region bound in a function scope, which will be substituted when the // function is called. ReLateBound(DebruijnIndex, BoundRegion), /// When checking a function body, the types of all arguments and so forth /// that refer to bound region parameters are modified to refer to free /// region parameters. ReFree(FreeRegion), /// A concrete region naming some statically determined scope /// (e.g., an expression or sequence of statements) within the /// current function. ReScope(region::Scope), /// Static data that has an "infinite" lifetime. Top in the region lattice. ReStatic, /// A region variable. Should not exist after typeck. ReVar(RegionVid), /// A placeholder region - basically the higher-ranked version of ReFree. /// Should not exist after typeck. RePlaceholder(ty::PlaceholderRegion), /// Empty lifetime is for data that is never accessed. /// Bottom in the region lattice. We treat ReEmpty somewhat /// specially; at least right now, we do not generate instances of /// it during the GLB computations, but rather /// generate an error instead. This is to improve error messages. /// The only way to get an instance of ReEmpty is to have a region /// variable with no constraints. ReEmpty, /// Erased region, used by trait selection, in MIR and during codegen. ReErased, /// These are regions bound in the "defining type" for a /// closure. They are used ONLY as part of the /// `ClosureRegionRequirements` that are produced by MIR borrowck. /// See `ClosureRegionRequirements` for more details. ReClosureBound(RegionVid), } impl<'tcx> serialize::UseSpecializedDecodable for Region<'tcx> {} #[derive(Copy, Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord)] pub struct EarlyBoundRegion { pub def_id: DefId, pub index: u32, pub name: InternedString, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct TyVid { pub index: u32, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct IntVid { pub index: u32, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub struct FloatVid { pub index: u32, } newtype_index! { pub struct RegionVid { DEBUG_FORMAT = custom, } } impl Atom for RegionVid { fn index(self) -> usize { Idx::index(self) } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] pub enum InferTy { TyVar(TyVid), IntVar(IntVid), FloatVar(FloatVid), /// A `FreshTy` is one that is generated as a replacement for an /// unbound type variable. This is convenient for caching etc. See /// `infer::freshen` for more details. FreshTy(u32), FreshIntTy(u32), FreshFloatTy(u32), } newtype_index! { pub struct BoundVar { .. } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct BoundTy { pub var: BoundVar, pub kind: BoundTyKind, } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub enum BoundTyKind { Anon, Param(InternedString), } impl_stable_hash_for!(struct BoundTy { var, kind }); impl_stable_hash_for!(enum self::BoundTyKind { Anon, Param(a) }); impl From<BoundVar> for BoundTy { fn from(var: BoundVar) -> Self { BoundTy { var, kind: BoundTyKind::Anon, } } } /// A `ProjectionPredicate` for an `ExistentialTraitRef`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)] pub struct ExistentialProjection<'tcx> { pub item_def_id: DefId, pub substs: &'tcx Substs<'tcx>, pub ty: Ty<'tcx>, } pub type PolyExistentialProjection<'tcx> = Binder<ExistentialProjection<'tcx>>; impl<'a, 'tcx, 'gcx> ExistentialProjection<'tcx> { /// Extracts the underlying existential trait reference from this projection. /// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`, /// then this function would return a `exists T. T: Iterator` existential trait /// reference. pub fn trait_ref(&self, tcx: TyCtxt<'_, '_, '_>) -> ty::ExistentialTraitRef<'tcx> { let def_id = tcx.associated_item(self.item_def_id).container.id(); ty::ExistentialTraitRef{ def_id, substs: self.substs, } } pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::ProjectionPredicate<'tcx> { // otherwise the escaping regions would be captured by the binders debug_assert!(!self_ty.has_escaping_bound_vars()); ty::ProjectionPredicate { projection_ty: ty::ProjectionTy { item_def_id: self.item_def_id, substs: tcx.mk_substs_trait(self_ty, self.substs), }, ty: self.ty, } } } impl<'a, 'tcx, 'gcx> PolyExistentialProjection<'tcx> { pub fn with_self_ty(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>, self_ty: Ty<'tcx>) -> ty::PolyProjectionPredicate<'tcx> { self.map_bound(|p| p.with_self_ty(tcx, self_ty)) } pub fn item_def_id(&self) -> DefId { return self.skip_binder().item_def_id; } } impl DebruijnIndex { /// Returns the resulting index when this value is moved into /// `amount` number of new binders. So e.g., if you had /// /// for<'a> fn(&'a x) /// /// and you wanted to change to /// /// for<'a> fn(for<'b> fn(&'a x)) /// /// you would need to shift the index for `'a` into a new binder. #[must_use] pub fn shifted_in(self, amount: u32) -> DebruijnIndex { DebruijnIndex::from_u32(self.as_u32() + amount) } /// Update this index in place by shifting it "in" through /// `amount` number of binders. pub fn shift_in(&mut self, amount: u32) { *self = self.shifted_in(amount); } /// Returns the resulting index when this value is moved out from /// `amount` number of new binders. #[must_use] pub fn shifted_out(self, amount: u32) -> DebruijnIndex { DebruijnIndex::from_u32(self.as_u32() - amount) } /// Update in place by shifting out from `amount` binders. pub fn shift_out(&mut self, amount: u32) { *self = self.shifted_out(amount); } /// Adjusts any Debruijn Indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: /// /// ``` /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) /// // Binder: D3 D2 D1 ^^ /// ``` /// /// Here, the region `'a` would have the debruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the /// debruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an /// error (and should fail an assertion failure). pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self { self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32()) } } impl_stable_hash_for!(struct DebruijnIndex { private }); /// Region utilities impl RegionKind { /// Is this region named by the user? pub fn has_name(&self) -> bool { match *self { RegionKind::ReEarlyBound(ebr) => ebr.has_name(), RegionKind::ReLateBound(_, br) => br.is_named(), RegionKind::ReFree(fr) => fr.bound_region.is_named(), RegionKind::ReScope(..) => false, RegionKind::ReStatic => true, RegionKind::ReVar(..) => false, RegionKind::RePlaceholder(placeholder) => placeholder.name.is_named(), RegionKind::ReEmpty => false, RegionKind::ReErased => false, RegionKind::ReClosureBound(..) => false, } } pub fn is_late_bound(&self) -> bool { match *self { ty::ReLateBound(..) => true, _ => false, } } pub fn bound_at_or_above_binder(&self, index: DebruijnIndex) -> bool { match *self { ty::ReLateBound(debruijn, _) => debruijn >= index, _ => false, } } /// Adjusts any Debruijn Indices so as to make `to_binder` the /// innermost binder. That is, if we have something bound at `to_binder`, /// it will now be bound at INNERMOST. This is an appropriate thing to do /// when moving a region out from inside binders: /// /// ``` /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _) /// // Binder: D3 D2 D1 ^^ /// ``` /// /// Here, the region `'a` would have the debruijn index D3, /// because it is the bound 3 binders out. However, if we wanted /// to refer to that region `'a` in the second argument (the `_`), /// those two binders would not be in scope. In that case, we /// might invoke `shift_out_to_binder(D3)`. This would adjust the /// debruijn index of `'a` to D1 (the innermost binder). /// /// If we invoke `shift_out_to_binder` and the region is in fact /// bound by one of the binders we are shifting out of, that is an /// error (and should fail an assertion failure). pub fn shifted_out_to_binder(&self, to_binder: ty::DebruijnIndex) -> RegionKind { match *self { ty::ReLateBound(debruijn, r) => ty::ReLateBound( debruijn.shifted_out_to_binder(to_binder), r, ), r => r } } pub fn keep_in_local_tcx(&self) -> bool { if let ty::ReVar(..) = self { true } else { false } } pub fn type_flags(&self) -> TypeFlags { let mut flags = TypeFlags::empty(); if self.keep_in_local_tcx() { flags = flags | TypeFlags::KEEP_IN_LOCAL_TCX; } match *self { ty::ReVar(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_INFER; } ty::RePlaceholder(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_PLACEHOLDER; } ty::ReLateBound(..) => { flags = flags | TypeFlags::HAS_RE_LATE_BOUND; } ty::ReEarlyBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_RE_EARLY_BOUND; } ty::ReEmpty | ty::ReStatic | ty::ReFree { .. } | ty::ReScope { .. } => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } ty::ReErased => { } ty::ReClosureBound(..) => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } } match *self { ty::ReStatic | ty::ReEmpty | ty::ReErased | ty::ReLateBound(..) => (), _ => flags = flags | TypeFlags::HAS_FREE_LOCAL_NAMES, } debug!("type_flags({:?}) = {:?}", self, flags); flags } /// Given an early-bound or free region, returns the def-id where it was bound. /// For example, consider the regions in this snippet of code: /// /// ``` /// impl<'a> Foo { /// ^^ -- early bound, declared on an impl /// /// fn bar<'b, 'c>(x: &self, y: &'b u32, z: &'c u64) where 'static: 'c /// ^^ ^^ ^ anonymous, late-bound /// | early-bound, appears in where-clauses /// late-bound, appears only in fn args /// {..} /// } /// ``` /// /// Here, `free_region_binding_scope('a)` would return the def-id /// of the impl, and for all the other highlighted regions, it /// would return the def-id of the function. In other cases (not shown), this /// function might return the def-id of a closure. pub fn free_region_binding_scope(&self, tcx: TyCtxt<'_, '_, '_>) -> DefId { match self { ty::ReEarlyBound(br) => { tcx.parent_def_id(br.def_id).unwrap() } ty::ReFree(fr) => fr.scope, _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self), } } } /// Type utilities impl<'a, 'gcx, 'tcx> TyS<'tcx> { pub fn is_unit(&self) -> bool { match self.sty { Tuple(ref tys) => tys.is_empty(), _ => false, } } pub fn is_never(&self) -> bool { match self.sty { Never => true, _ => false, } } /// Checks whether a type is definitely uninhabited. This is /// conservative: for some types that are uninhabited we return `false`, /// but we only return `true` for types that are definitely uninhabited. /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty` /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero /// size, to account for partial initialisation. See #49298 for details.) pub fn conservative_is_privately_uninhabited(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> bool { // FIXME(varkor): we can make this less conversative by substituting concrete // type arguments. match self.sty { ty::Never => true, ty::Adt(def, _) if def.is_union() => { // For now, `union`s are never considered uninhabited. false } ty::Adt(def, _) => { // Any ADT is uninhabited if either: // (a) It has no variants (i.e. an empty `enum`); // (b) Each of its variants (a single one in the case of a `struct`) has at least // one uninhabited field. def.variants.iter().all(|var| { var.fields.iter().any(|field| { tcx.type_of(field.did).conservative_is_privately_uninhabited(tcx) }) }) } ty::Tuple(tys) => tys.iter().any(|ty| ty.conservative_is_privately_uninhabited(tcx)), ty::Array(ty, len) => { match len.assert_usize(tcx) { // If the array is definitely non-empty, it's uninhabited if // the type of its elements is uninhabited. Some(n) if n != 0 => ty.conservative_is_privately_uninhabited(tcx), _ => false } } ty::Ref(..) => { // References to uninitialised memory is valid for any type, including // uninhabited types, in unsafe code, so we treat all references as // inhabited. false } _ => false, } } pub fn is_primitive(&self) -> bool { match self.sty { Bool | Char | Int(_) | Uint(_) | Float(_) => true, _ => false, } } #[inline] pub fn is_ty_var(&self) -> bool { match self.sty { Infer(TyVar(_)) => true, _ => false, } } pub fn is_ty_infer(&self) -> bool { match self.sty { Infer(_) => true, _ => false, } } pub fn is_phantom_data(&self) -> bool { if let Adt(def, _) = self.sty { def.is_phantom_data() } else { false } } pub fn is_bool(&self) -> bool { self.sty == Bool } pub fn is_param(&self, index: u32) -> bool { match self.sty { ty::Param(ref data) => data.idx == index, _ => false, } } pub fn is_self(&self) -> bool { match self.sty { Param(ref p) => p.is_self(), _ => false, } } pub fn is_slice(&self) -> bool { match self.sty { RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => match ty.sty { Slice(_) | Str => true, _ => false, }, _ => false } } #[inline] pub fn is_simd(&self) -> bool { match self.sty { Adt(def, _) => def.repr.simd(), _ => false, } } pub fn sequence_element_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match self.sty { Array(ty, _) | Slice(ty) => ty, Str => tcx.mk_mach_uint(ast::UintTy::U8), _ => bug!("sequence_element_type called on non-sequence value: {}", self), } } pub fn simd_type(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> Ty<'tcx> { match self.sty { Adt(def, substs) => { def.non_enum_variant().fields[0].ty(tcx, substs) } _ => bug!("simd_type called on invalid type") } } pub fn simd_size(&self, _cx: TyCtxt<'_, '_, '_>) -> usize { match self.sty { Adt(def, _) => def.non_enum_variant().fields.len(), _ => bug!("simd_size called on invalid type") } } pub fn is_region_ptr(&self) -> bool { match self.sty { Ref(..) => true, _ => false, } } pub fn is_mutable_pointer(&self) -> bool { match self.sty { RawPtr(TypeAndMut { mutbl: hir::Mutability::MutMutable, .. }) | Ref(_, _, hir::Mutability::MutMutable) => true, _ => false } } pub fn is_unsafe_ptr(&self) -> bool { match self.sty { RawPtr(_) => return true, _ => return false, } } /// Returns `true` if this type is an `Arc<T>`. pub fn is_arc(&self) -> bool { match self.sty { Adt(def, _) => def.is_arc(), _ => false, } } /// Returns `true` if this type is an `Rc<T>`. pub fn is_rc(&self) -> bool { match self.sty { Adt(def, _) => def.is_rc(), _ => false, } } pub fn is_box(&self) -> bool { match self.sty { Adt(def, _) => def.is_box(), _ => false, } } /// panics if called on any type other than `Box<T>` pub fn boxed_ty(&self) -> Ty<'tcx> { match self.sty { Adt(def, substs) if def.is_box() => substs.type_at(0), _ => bug!("`boxed_ty` is called on non-box type {:?}", self), } } /// A scalar type is one that denotes an atomic datum, with no sub-components. /// (A RawPtr is scalar because it represents a non-managed pointer, so its /// contents are abstract to rustc.) pub fn is_scalar(&self) -> bool { match self.sty { Bool | Char | Int(_) | Float(_) | Uint(_) | Infer(IntVar(_)) | Infer(FloatVar(_)) | FnDef(..) | FnPtr(_) | RawPtr(_) => true, _ => false } } /// Returns true if this type is a floating point type and false otherwise. pub fn is_floating_point(&self) -> bool { match self.sty { Float(_) | Infer(FloatVar(_)) => true, _ => false, } } pub fn is_trait(&self) -> bool { match self.sty { Dynamic(..) => true, _ => false, } } pub fn is_enum(&self) -> bool { match self.sty { Adt(adt_def, _) => { adt_def.is_enum() } _ => false, } } pub fn is_closure(&self) -> bool { match self.sty { Closure(..) => true, _ => false, } } pub fn is_generator(&self) -> bool { match self.sty { Generator(..) => true, _ => false, } } #[inline] pub fn is_integral(&self) -> bool { match self.sty { Infer(IntVar(_)) | Int(_) | Uint(_) => true, _ => false } } pub fn is_fresh_ty(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, _ => false, } } pub fn is_fresh(&self) -> bool { match self.sty { Infer(FreshTy(_)) => true, Infer(FreshIntTy(_)) => true, Infer(FreshFloatTy(_)) => true, _ => false, } } pub fn is_char(&self) -> bool { match self.sty { Char => true, _ => false, } } #[inline] pub fn is_fp(&self) -> bool { match self.sty { Infer(FloatVar(_)) | Float(_) => true, _ => false } } pub fn is_numeric(&self) -> bool { self.is_integral() || self.is_fp() } pub fn is_signed(&self) -> bool { match self.sty { Int(_) => true, _ => false, } } pub fn is_pointer_sized(&self) -> bool { match self.sty { Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => true, _ => false, } } pub fn is_machine(&self) -> bool { match self.sty { Int(ast::IntTy::Isize) | Uint(ast::UintTy::Usize) => false, Int(..) | Uint(..) | Float(..) => true, _ => false, } } pub fn has_concrete_skeleton(&self) -> bool { match self.sty { Param(_) | Infer(_) | Error => false, _ => true, } } /// Returns the type and mutability of `*ty`. /// /// The parameter `explicit` indicates if this is an *explicit* dereference. /// Some types -- notably unsafe ptrs -- can only be dereferenced explicitly. pub fn builtin_deref(&self, explicit: bool) -> Option<TypeAndMut<'tcx>> { match self.sty { Adt(def, _) if def.is_box() => { Some(TypeAndMut { ty: self.boxed_ty(), mutbl: hir::MutImmutable, }) }, Ref(_, ty, mutbl) => Some(TypeAndMut { ty, mutbl }), RawPtr(mt) if explicit => Some(mt), _ => None, } } /// Returns the type of `ty[i]`. pub fn builtin_index(&self) -> Option<Ty<'tcx>>
pub fn fn_sig(&self, tcx: TyCtxt<'a, 'gcx, 'tcx>) -> PolyFnSig<'tcx> { match self.sty { FnDef(def_id, substs) => { tcx.fn_sig(def_id).subst(tcx, substs) } FnPtr(f) => f, _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self) } } pub fn is_fn(&self) -> bool { match self.sty { FnDef(..) | FnPtr(_) => true, _ => false, } } pub fn is_impl_trait(&self) -> bool { match self.sty { Opaque(..) => true, _ => false, } } #[inline] pub fn ty_adt_def(&self) -> Option<&'tcx AdtDef> { match self.sty { Adt(adt, _) => Some(adt), _ => None, } } /// Push onto `out` the regions directly referenced from this type (but not /// types reachable from this type via `walk_tys`). This ignores late-bound /// regions binders. pub fn push_regions(&self, out: &mut SmallVec<[ty::Region<'tcx>; 4]>) { match self.sty { Ref(region, _, _) => { out.push(region); } Dynamic(ref obj, region) => { out.push(region); out.extend(obj.principal().skip_binder().substs.regions()); } Adt(_, substs) | Opaque(_, substs) => { out.extend(substs.regions()) } Closure(_, ClosureSubsts { ref substs }) | Generator(_, GeneratorSubsts { ref substs }, _) => { out.extend(substs.regions()) } Projection(ref data) | UnnormalizedProjection(ref data) => { out.extend(data.substs.regions()) } FnDef(..) | FnPtr(_) | GeneratorWitness(..) | Bool | Char | Int(_) | Uint(_) | Float(_) | Str | Array(..) | Slice(_) | RawPtr(_) | Never | Tuple(..) | Foreign(..) | Param(_) | Bound(..) | Placeholder(..) | Infer(_) | Error => {} } } /// When we create a closure, we record its kind (i.e., what trait /// it implements) into its `ClosureSubsts` using a type /// parameter. This is kind of a phantom type, except that the /// most convenient thing for us to are the integral types. This /// function converts such a special type into the closure /// kind. To go the other way, use /// `tcx.closure_kind_ty(closure_kind)`. /// /// Note that during type checking, we use an inference variable /// to represent the closure kind, because it has not yet been /// inferred. Once upvar inference (in `src/librustc_typeck/check/upvar.rs`) /// is complete, that type variable will be unified. pub fn to_opt_closure_kind(&self) -> Option<ty::ClosureKind> { match self.sty { Int(int_ty) => match int_ty { ast::IntTy::I8 => Some(ty::ClosureKind::Fn), ast::IntTy::I16 => Some(ty::ClosureKind::FnMut), ast::IntTy::I32 => Some(ty::ClosureKind::FnOnce), _ => bug!("cannot convert type `{:?}` to a closure kind", self), }, Infer(_) => None, Error => Some(ty::ClosureKind::Fn), _ => bug!("cannot convert type `{:?}` to a closure kind", self), } } /// Fast path helper for testing if a type is `Sized`. /// /// Returning true means the type is known to be sized. Returning /// `false` means nothing -- could be sized, might not be. pub fn is_trivially_sized(&self, tcx: TyCtxt<'_, '_, 'tcx>) -> bool { match self.sty { ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_)) | ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Float(_) | ty::FnDef(..) | ty::FnPtr(_) | ty::RawPtr(..) | ty::Char | ty::Ref(..) | ty::Generator(..) | ty::GeneratorWitness(..) | ty::Array(..) | ty::Closure(..) | ty::Never | ty::Error => true, ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false, ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)), ty::Adt(def, _substs) => def.sized_constraint(tcx).is_empty(), ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false, ty::UnnormalizedProjection(..) => bug!("only used with chalk-engine"), ty::Infer(ty::TyVar(_)) => false, ty::Bound(..) | ty::Placeholder(..) | ty::Infer(ty::FreshTy(_)) | ty::Infer(ty::FreshIntTy(_)) | ty::Infer(ty::FreshFloatTy(_)) => bug!("is_trivially_sized applied to unexpected type: {:?}", self), } } } /// Typed constant value. #[derive(Copy, Clone, Debug, Hash, RustcEncodable, RustcDecodable, Eq, PartialEq, Ord, PartialOrd)] pub struct Const<'tcx> { pub ty: Ty<'tcx>, pub val: ConstValue<'tcx>, } impl<'tcx> Const<'tcx> { pub fn unevaluated( tcx: TyCtxt<'_, '_, 'tcx>, def_id: DefId, substs: &'tcx Substs<'tcx>, ty: Ty<'tcx>, ) -> &'tcx Self { tcx.mk_const(Const { val: ConstValue::Unevaluated(def_id, substs), ty, }) } #[inline] pub fn from_const_value( tcx: TyCtxt<'_, '_, 'tcx>, val: ConstValue<'tcx>, ty: Ty<'tcx>, ) -> &'tcx Self { tcx.mk_const(Const { val, ty, }) } #[inline] pub fn from_scalar( tcx: TyCtxt<'_, '_, 'tcx>, val: Scalar, ty: Ty<'tcx>, ) -> &'tcx Self { Self::from_const_value(tcx, ConstValue::Scalar(val), ty) } #[inline] pub fn from_bits( tcx: TyCtxt<'_, '_, 'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> &'tcx Self { let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).unwrap_or_else(|e| { panic!("could not compute layout for {:?}: {:?}", ty, e) }).size; let shift = 128 - size.bits(); let truncated = (bits << shift) >> shift; assert_eq!(truncated, bits, "from_bits called with untruncated value"); Self::from_scalar(tcx, Scalar::Bits { bits, size: size.bytes() as u8 }, ty.value) } #[inline] pub fn zero_sized(tcx: TyCtxt<'_, '_, 'tcx>, ty: Ty<'tcx>) -> &'tcx Self { Self::from_scalar(tcx, Scalar::Bits { bits: 0, size: 0 }, ty) } #[inline] pub fn from_bool(tcx: TyCtxt<'_, '_, 'tcx>, v: bool) -> &'tcx Self { Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool)) } #[inline] pub fn from_usize(tcx: TyCtxt<'_, '_, 'tcx>, n: u64) -> &'tcx Self { Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize)) } #[inline] pub fn to_bits( &self, tcx: TyCtxt<'_, '_, 'tcx>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option<u128> { if self.ty != ty.value { return None; } let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; self.val.try_to_bits(size) } #[inline] pub fn to_ptr(&self) -> Option<Pointer> { self.val.try_to_ptr() } #[inline] pub fn assert_bits( &self, tcx: TyCtxt<'_, '_, '_>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Option<u128> { assert_eq!(self.ty, ty.value); let ty = tcx.lift_to_global(&ty).unwrap(); let size = tcx.layout_of(ty).ok()?.size; self.val.try_to_bits(size) } #[inline] pub fn assert_bool(&self, tcx: TyCtxt<'_, '_, '_>) -> Option<bool> { self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.bool)).and_then(|v| match v { 0 => Some(false), 1 => Some(true), _ => None, }) } #[inline] pub fn assert_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> Option<u64> { self.assert_bits(tcx, ParamEnv::empty().and(tcx.types.usize)).map(|v| v as u64) } #[inline] pub fn unwrap_bits( &self, tcx: TyCtxt<'_, '_, '_>, ty: ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> u128 { self.assert_bits(tcx, ty).unwrap_or_else(|| bug!("expected bits of {}, got {:#?}", ty.value, self)) } #[inline] pub fn unwrap_usize(&self, tcx: TyCtxt<'_, '_, '_>) -> u64 { self.assert_usize(tcx).unwrap_or_else(|| bug!("expected constant usize, got {:#?}", self)) } } impl<'tcx> serialize::UseSpecializedDecodable for &'tcx Const<'tcx> {}
{ match self.sty { Array(ty, _) | Slice(ty) => Some(ty), _ => None, } }
LinkButton.js
import React, {Component} from 'react'; import {Link} from 'react-router'; export default class
extends Component { constructor(props) { super(props); } render() { let queryString = this.props.location.search ? this.props.location.search : ''; let linkClass = this.props.linkClass ? this.props.linkClass : ''; return ( <Link to={this.props.linkRoute + queryString} className={linkClass} onClick={this.props.onClick}> {this.props.children} </Link> ); }; }
LinkButton
expression.rs
use super::address_transform::AddressTransform; use crate::debug::ModuleMemoryOffset; use anyhow::{Context, Error, Result}; use cranelift_codegen::ir::{LabelValueLoc, StackSlots, ValueLabel}; use cranelift_codegen::isa::TargetIsa; use cranelift_codegen::ValueLabelsRanges; use cranelift_wasm::get_vmctx_value_label; use gimli::{self, write, Expression, Operation, Reader, ReaderOffset, X86_64}; use more_asserts::{assert_le, assert_lt}; use std::cmp::PartialEq; use std::collections::{HashMap, HashSet}; use std::hash::{Hash, Hasher}; use std::rc::Rc; use wasmtime_environ::{DefinedFuncIndex, EntityRef}; #[derive(Debug)] pub struct FunctionFrameInfo<'a> { pub value_ranges: &'a ValueLabelsRanges, pub memory_offset: ModuleMemoryOffset, pub stack_slots: &'a StackSlots, } impl<'a> FunctionFrameInfo<'a> { fn vmctx_memory_offset(&self) -> Option<i64> { match self.memory_offset { ModuleMemoryOffset::Defined(x) => Some(x as i64), ModuleMemoryOffset::Imported(_) => { // TODO implement memory offset for imported memory None } ModuleMemoryOffset::None => None, } } } struct ExpressionWriter(write::EndianVec<gimli::RunTimeEndian>); impl ExpressionWriter { pub fn new() -> Self { let endian = gimli::RunTimeEndian::Little; let writer = write::EndianVec::new(endian); ExpressionWriter(writer) } pub fn write_op(&mut self, op: gimli::DwOp) -> write::Result<()> { self.write_u8(op.0 as u8) } pub fn write_op_reg(&mut self, reg: u16) -> write::Result<()> { if reg < 32 { self.write_u8(gimli::constants::DW_OP_reg0.0 as u8 + reg as u8) } else { self.write_op(gimli::constants::DW_OP_regx)?; self.write_uleb128(reg.into()) } } pub fn write_op_breg(&mut self, reg: u16) -> write::Result<()> { if reg < 32 { self.write_u8(gimli::constants::DW_OP_breg0.0 as u8 + reg as u8) } else { self.write_op(gimli::constants::DW_OP_bregx)?; self.write_uleb128(reg.into()) } } pub fn write_u8(&mut self, b: u8) -> write::Result<()> { write::Writer::write_u8(&mut self.0, b) } pub fn write_u32(&mut self, b: u32) -> write::Result<()> { write::Writer::write_u32(&mut self.0, b) } pub fn write_uleb128(&mut self, i: u64) -> write::Result<()> { write::Writer::write_uleb128(&mut self.0, i) } pub fn write_sleb128(&mut self, i: i64) -> write::Result<()> { write::Writer::write_sleb128(&mut self.0, i) } pub fn into_vec(self) -> Vec<u8> { self.0.into_vec() } } #[derive(Debug, Clone, PartialEq)] enum CompiledExpressionPart { // Untranslated DWARF expression. Code(Vec<u8>), // The wasm-local DWARF operator. The label points to `ValueLabel`. // The trailing field denotes that the operator was last in sequence, // and it is the DWARF location (not a pointer). Local { label: ValueLabel, trailing: bool, }, // Dereference is needed. Deref, // Jumping in the expression. Jump { conditionally: bool, target: JumpTargetMarker, }, // Floating landing pad. LandingPad(JumpTargetMarker), } #[derive(Debug, Clone, PartialEq)] pub struct CompiledExpression { parts: Vec<CompiledExpressionPart>, need_deref: bool, } impl CompiledExpression { pub fn vmctx() -> CompiledExpression { CompiledExpression::from_label(get_vmctx_value_label()) } pub fn from_label(label: ValueLabel) -> CompiledExpression { CompiledExpression { parts: vec![CompiledExpressionPart::Local { label, trailing: true, }], need_deref: false, } } } pub fn write_expr_addr(addr : u64)-> gimli::write::Expression { let mut w = ExpressionWriter::new(); w.write_op(gimli::constants::DW_OP_addr); let bytes = addr.to_le_bytes(); w.write_u8(bytes[0]); w.write_u8(bytes[1]); w.write_u8(bytes[2]); w.write_u8(bytes[3]); w.write_u8(bytes[4]); w.write_u8(bytes[5]); w.write_u8(bytes[6]); w.write_u8(bytes[7]); gimli::write::Expression::raw(w.into_vec()) } fn translate_loc( loc: LabelValueLoc, isa: &dyn TargetIsa, add_stack_value: bool, ) -> Result<Option<Vec<u8>>> { Ok(match loc { LabelValueLoc::Reg(r) => { let machine_reg = isa.map_regalloc_reg_to_dwarf(r)?; let mut writer = ExpressionWriter::new(); if add_stack_value { writer.write_op_reg(machine_reg)?; } else { writer.write_op_breg(machine_reg)?; writer.write_sleb128(0)?; } Some(writer.into_vec()) } LabelValueLoc::SPOffset(off) => { let mut writer = ExpressionWriter::new(); writer.write_op_breg(X86_64::RSP.0)?; writer.write_sleb128(off)?; if !add_stack_value { writer.write_op(gimli::constants::DW_OP_deref)?; } return Ok(Some(writer.into_vec())); } }) } fn append_memory_deref( buf: &mut Vec<u8>, frame_info: &FunctionFrameInfo, vmctx_loc: LabelValueLoc, isa: &dyn TargetIsa, ) -> Result<bool> { let mut writer = ExpressionWriter::new(); // FIXME for imported memory match vmctx_loc { LabelValueLoc::Reg(r) => { let reg = isa.map_regalloc_reg_to_dwarf(r)?; writer.write_op_breg(reg)?; let memory_offset = match frame_info.vmctx_memory_offset() { Some(offset) => offset, None => { return Ok(false); } }; writer.write_sleb128(memory_offset)?; } LabelValueLoc::SPOffset(off) => { writer.write_op_breg(X86_64::RSP.0)?; writer.write_sleb128(off)?; writer.write_op(gimli::constants::DW_OP_deref)?; writer.write_op(gimli::constants::DW_OP_consts)?; let memory_offset = match frame_info.vmctx_memory_offset() { Some(offset) => offset, None => { return Ok(false); } }; writer.write_sleb128(memory_offset)?; writer.write_op(gimli::constants::DW_OP_plus)?; } } writer.write_op(gimli::constants::DW_OP_deref)?; writer.write_op(gimli::constants::DW_OP_swap)?; writer.write_op(gimli::constants::DW_OP_const4u)?; writer.write_u32(0xffff_ffff)?; writer.write_op(gimli::constants::DW_OP_and)?; writer.write_op(gimli::constants::DW_OP_plus)?; buf.extend(writer.into_vec()); Ok(true) } impl CompiledExpression { pub fn is_simple(&self) -> bool { if let [CompiledExpressionPart::Code(_)] = self.parts.as_slice() { true } else { self.parts.is_empty() } } pub fn build(&self) -> Option<write::Expression> { if let [CompiledExpressionPart::Code(code)] = self.parts.as_slice() { return Some(write::Expression::raw(code.to_vec())); } // locals found, not supported None } pub fn build_with_locals<'a>( &'a self, scope: &'a [(u64, u64)], // wasm ranges addr_tr: &'a AddressTransform, frame_info: Option<&'a FunctionFrameInfo>, isa: &'a dyn TargetIsa, ) -> impl Iterator<Item = Result<(write::Address, u64, write::Expression)>> + 'a { enum BuildWithLocalsResult<'a> { Empty, Simple( Box<dyn Iterator<Item = (write::Address, u64)> + 'a>, Vec<u8>, ), Ranges( Box<dyn Iterator<Item = Result<(DefinedFuncIndex, usize, usize, Vec<u8>)>> + 'a>, ), } impl Iterator for BuildWithLocalsResult<'_> { type Item = Result<(write::Address, u64, write::Expression)>; fn next(&mut self) -> Option<Self::Item> { match self { BuildWithLocalsResult::Empty => None, BuildWithLocalsResult::Simple(it, code) => it .next() .map(|(addr, len)| Ok((addr, len, write::Expression::raw(code.to_vec())))), BuildWithLocalsResult::Ranges(it) => it.next().map(|r| { r.map(|(func_index, start, end, code_buf)| { ( write::Address::Symbol { symbol: func_index.index(), addend: start as i64, }, (end - start) as u64, write::Expression::raw(code_buf), ) }) }), } } } if scope.is_empty() { return BuildWithLocalsResult::Empty; } // If it a simple DWARF code, no need in locals processing. Just translate // the scope ranges. if let [CompiledExpressionPart::Code(code)] = self.parts.as_slice() { return BuildWithLocalsResult::Simple( Box::new(scope.iter().flat_map(move |(wasm_start, wasm_end)| { addr_tr.translate_ranges(*wasm_start, *wasm_end) })), code.clone(), ); } let vmctx_label = get_vmctx_value_label(); // Some locals are present, preparing and divided ranges based on the scope // and frame_info data. let mut ranges_builder = ValueLabelRangesBuilder::new(scope, addr_tr, frame_info); for p in self.parts.iter() { match p { CompiledExpressionPart::Code(_) | CompiledExpressionPart::Jump { .. } | CompiledExpressionPart::LandingPad { .. } => (), CompiledExpressionPart::Local { label, .. } => ranges_builder.process_label(*label), CompiledExpressionPart::Deref => ranges_builder.process_label(vmctx_label), } } if self.need_deref { ranges_builder.process_label(vmctx_label); } let ranges = ranges_builder.into_ranges(); return BuildWithLocalsResult::Ranges(Box::new( ranges .into_iter() .map( move |CachedValueLabelRange { func_index, start, end, label_location, }| { // build expression let mut code_buf = Vec::new(); let mut jump_positions = Vec::new(); let mut landing_positions = HashMap::new(); macro_rules! deref { () => { if let (Some(vmctx_loc), Some(frame_info)) = (label_location.get(&vmctx_label), frame_info) { if !append_memory_deref( &mut code_buf, frame_info, *vmctx_loc, isa, )? { return Ok(None); } } else { return Ok(None); } }; } for part in &self.parts { match part { CompiledExpressionPart::Code(c) => { code_buf.extend_from_slice(c.as_slice()) } CompiledExpressionPart::LandingPad(marker) => { landing_positions.insert(marker.clone(), code_buf.len()); } CompiledExpressionPart::Jump { conditionally, target, } => { code_buf.push( match conditionally { true => gimli::constants::DW_OP_bra, false => gimli::constants::DW_OP_skip, } .0 as u8, ); code_buf.push(!0); code_buf.push(!0); // these will be relocated below jump_positions.push((target.clone(), code_buf.len())); } CompiledExpressionPart::Local { label, trailing } => { let loc = *label_location.get(&label).context("label_location")?; if let Some(expr) = translate_loc(loc, isa, *trailing)? { code_buf.extend_from_slice(&expr) } else { return Ok(None); } } CompiledExpressionPart::Deref => deref!(), } } if self.need_deref { deref!(); } for (marker, new_from) in jump_positions { // relocate jump targets let new_to = landing_positions[&marker]; let new_diff = new_to as isize - new_from as isize; // FIXME: use encoding? LittleEndian for now... code_buf[new_from - 2..new_from] .copy_from_slice(&(new_diff as i16).to_le_bytes()); } Ok(Some((func_index, start, end, code_buf))) }, ) .filter_map(Result::transpose), )); } } fn is_old_expression_format(buf: &[u8]) -> bool { // Heuristic to detect old variable expression format without DW_OP_fbreg: // DW_OP_plus_uconst op must be present, but not DW_OP_fbreg. if buf.contains(&(gimli::constants::DW_OP_fbreg.0 as u8)) { // Stop check if DW_OP_fbreg exist. return false; } buf.contains(&(gimli::constants::DW_OP_plus_uconst.0 as u8)) } pub fn compile_expression<R>( expr: &Expression<R>, encoding: gimli::Encoding, frame_base: Option<&CompiledExpression>, ) -> Result<Option<CompiledExpression>, Error> where R: Reader, { // Bail when `frame_base` is complicated. if let Some(expr) = frame_base { if expr.parts.iter().any(|p| match p { CompiledExpressionPart::Jump { .. } => true, _ => false, }) { return Ok(None); } } // jump_targets key is offset in buf starting from the end // (see also `unread_bytes` below) let mut jump_targets: HashMap<u64, JumpTargetMarker> = HashMap::new(); let mut pc = expr.0.clone(); let buf = expr.0.to_slice()?; let mut parts = Vec::new(); macro_rules! push { ($part:expr) => {{ let part = $part; if let (CompiledExpressionPart::Code(cc2), Some(CompiledExpressionPart::Code(cc1))) = (&part, parts.last_mut()) { cc1.extend_from_slice(cc2); } else { parts.push(part) } }}; } let mut need_deref = false; if is_old_expression_format(&buf) && frame_base.is_some() { // Still supporting old DWARF variable expressions without fbreg. parts.extend_from_slice(&frame_base.unwrap().parts); if let Some(CompiledExpressionPart::Local { trailing, .. }) = parts.last_mut() { *trailing = false; } need_deref = frame_base.unwrap().need_deref; } let mut code_chunk = Vec::new(); macro_rules! flush_code_chunk { () => { if !code_chunk.is_empty() { push!(CompiledExpressionPart::Code(code_chunk)); code_chunk = Vec::new(); let _ = code_chunk; // suppresses warning for final flush } }; } // Find all landing pads by scanning bytes, do not care about // false location at this moment. // Looks hacky but it is fast; does not need to be really exact. if buf.len() > 2 { for i in 0..buf.len() - 2 { let op = buf[i]; if op == gimli::constants::DW_OP_bra.0 || op == gimli::constants::DW_OP_skip.0 { // TODO fix for big-endian let offset = i16::from_le_bytes([buf[i + 1], buf[i + 2]]); let origin = i + 3; // Discarding out-of-bounds jumps (also some of falsely detected ops) if (offset >= 0 && offset as usize + origin <= buf.len()) || (offset < 0 && -offset as usize <= origin) { let target = buf.len() as isize - origin as isize - offset as isize; jump_targets.insert(target as u64, JumpTargetMarker::new()); } } } } while !pc.is_empty() { let unread_bytes = pc.len().into_u64(); if let Some(marker) = jump_targets.get(&unread_bytes) { flush_code_chunk!(); parts.push(CompiledExpressionPart::LandingPad(marker.clone())); } need_deref = true; let pos = pc.offset_from(&expr.0).into_u64() as usize; let op = Operation::parse(&mut pc, encoding)?; match op { Operation::FrameOffset { offset } => { // Expand DW_OP_fbreg into frame location and DW_OP_plus_uconst. if frame_base.is_some() { // Add frame base expressions. flush_code_chunk!(); parts.extend_from_slice(&frame_base.unwrap().parts); } if let Some(CompiledExpressionPart::Local { trailing, .. }) = parts.last_mut() { // Reset local trailing flag. *trailing = false; } // Append DW_OP_plus_uconst part. let mut writer = ExpressionWriter::new(); writer.write_op(gimli::constants::DW_OP_plus_uconst)?; writer.write_uleb128(offset as u64)?; code_chunk.extend(writer.into_vec()); continue; } Operation::Drop { .. } | Operation::Pick { .. } | Operation::Swap { .. } | Operation::Rot { .. } | Operation::Nop { .. } | Operation::UnsignedConstant { .. } | Operation::SignedConstant { .. } | Operation::ConstantIndex { .. } | Operation::PlusConstant { .. } | Operation::Abs { .. } | Operation::And { .. } | Operation::Or { .. } | Operation::Xor { .. } | Operation::Shl { .. } | Operation::Plus { .. } | Operation::Minus { .. } | Operation::Div { .. } | Operation::Mod { .. } | Operation::Mul { .. } | Operation::Neg { .. } | Operation::Not { .. } | Operation::Lt { .. } | Operation::Gt { .. } | Operation::Le { .. } | Operation::Ge { .. } | Operation::Eq { .. } | Operation::Ne { .. } | Operation::TypedLiteral { .. } | Operation::Convert { .. } | Operation::Reinterpret { .. } | Operation::Piece { .. } => (), Operation::Bra { target } | Operation::Skip { target } => { flush_code_chunk!(); let arc_to = (pc.len().into_u64() as isize - target as isize) as u64; let marker = match jump_targets.get(&arc_to) { Some(m) => m.clone(), None => { // Marker not found: probably out of bounds. return Ok(None); } }; push!(CompiledExpressionPart::Jump { conditionally: match op { Operation::Bra { .. } => true, _ => false, }, target: marker, }); continue; } Operation::StackValue => { need_deref = false; // Find extra stack_value, that follow wasm-local operators, // and mark such locals with special flag. if let (Some(CompiledExpressionPart::Local { trailing, .. }), true) = (parts.last_mut(), code_chunk.is_empty()) { *trailing = true; continue; } } Operation::Deref { .. } => { flush_code_chunk!(); push!(CompiledExpressionPart::Deref); // Don't re-enter the loop here (i.e. continue), because the // DW_OP_deref still needs to be kept. } Operation::WasmLocal { index } => { flush_code_chunk!(); let label = ValueLabel::from_u32(index as u32); push!(CompiledExpressionPart::Local { label, trailing: false, }); continue; } Operation::Shr { .. } | Operation::Shra { .. } => { // Insert value normalisation part. // The semantic value is 32 bits (TODO: check unit) // but the target architecture is 64-bits. So we'll // clean out the upper 32 bits (in a sign-correct way) // to avoid contamination of the result with randomness. let mut writer = ExpressionWriter::new(); writer.write_op(gimli::constants::DW_OP_plus_uconst)?; writer.write_uleb128(32)?; // increase shift amount writer.write_op(gimli::constants::DW_OP_swap)?; writer.write_op(gimli::constants::DW_OP_const1u)?; writer.write_u8(32)?; writer.write_op(gimli::constants::DW_OP_shl)?; writer.write_op(gimli::constants::DW_OP_swap)?; code_chunk.extend(writer.into_vec()); // Don't re-enter the loop here (i.e. continue), because the // DW_OP_shr* still needs to be kept. } Operation::Address { .. } | Operation::AddressIndex { .. } | Operation::Call { .. } | Operation::Register { .. } | Operation::RegisterOffset { .. } | Operation::CallFrameCFA | Operation::PushObjectAddress | Operation::TLS | Operation::ImplicitValue { .. } | Operation::ImplicitPointer { .. } | Operation::EntryValue { .. } | Operation::ParameterRef { .. } => { return Ok(None); } Operation::WasmGlobal { index: _ } | Operation::WasmStack { index: _ } => { // TODO support those two return Ok(None); } } let chunk = &buf[pos..pc.offset_from(&expr.0).into_u64() as usize]; code_chunk.extend_from_slice(chunk); } flush_code_chunk!(); if let Some(marker) = jump_targets.get(&0) { parts.push(CompiledExpressionPart::LandingPad(marker.clone())); } Ok(Some(CompiledExpression { parts, need_deref })) } #[derive(Debug, Clone)] struct CachedValueLabelRange { func_index: DefinedFuncIndex, start: usize, end: usize, label_location: HashMap<ValueLabel, LabelValueLoc>, } struct ValueLabelRangesBuilder<'a, 'b> { ranges: Vec<CachedValueLabelRange>, frame_info: Option<&'a FunctionFrameInfo<'b>>, processed_labels: HashSet<ValueLabel>, } impl<'a, 'b> ValueLabelRangesBuilder<'a, 'b> { pub fn new( scope: &[(u64, u64)], // wasm ranges addr_tr: &'a AddressTransform, frame_info: Option<&'a FunctionFrameInfo<'b>>, ) -> Self { let mut ranges = Vec::new(); for (wasm_start, wasm_end) in scope { if let Some((func_index, tr)) = addr_tr.translate_ranges_raw(*wasm_start, *wasm_end) { ranges.extend(tr.into_iter().map(|(start, end)| CachedValueLabelRange { func_index, start, end, label_location: HashMap::new(), })); } } ranges.sort_unstable_by(|a, b| a.start.cmp(&b.start)); ValueLabelRangesBuilder { ranges, frame_info, processed_labels: HashSet::new(), } } fn process_label(&mut self, label: ValueLabel) { if self.processed_labels.contains(&label) { return; } self.processed_labels.insert(label); let value_ranges = match self.frame_info.and_then(|fi| fi.value_ranges.get(&label)) { Some(value_ranges) => value_ranges, None => { return; } }; let ranges = &mut self.ranges; for value_range in value_ranges { let range_start = value_range.start as usize; let range_end = value_range.end as usize; let loc = value_range.loc; if range_start == range_end { continue; } assert_lt!(range_start, range_end); // Find acceptable scope of ranges to intersect with. let i = match ranges.binary_search_by(|s| s.start.cmp(&range_start)) { Ok(i) => i, Err(i) => { if i > 0 && range_start < ranges[i - 1].end { i - 1 } else { i } } }; let j = match ranges.binary_search_by(|s| s.start.cmp(&range_end)) { Ok(i) | Err(i) => i, }; // Starting from the end, intersect (range_start..range_end) with // self.ranges array. for i in (i..j).rev() { if range_end <= ranges[i].start || ranges[i].end <= range_start { continue; } if range_end < ranges[i].end { // Cutting some of the range from the end. let mut tail = ranges[i].clone(); ranges[i].end = range_end; tail.start = range_end; ranges.insert(i + 1, tail); } assert_le!(ranges[i].end, range_end); if range_start <= ranges[i].start { ranges[i].label_location.insert(label, loc); continue; } // Cutting some of the range from the start. let mut tail = ranges[i].clone(); ranges[i].end = range_start; tail.start = range_start; tail.label_location.insert(label, loc); ranges.insert(i + 1, tail); } } } pub fn into_ranges(self) -> impl Iterator<Item = CachedValueLabelRange> { // Ranges with not-enough labels are discarded. let processed_labels_len = self.processed_labels.len(); self.ranges .into_iter() .filter(move |r| r.label_location.len() == processed_labels_len) } } /// Marker for tracking incoming jumps. /// Different when created new, and the same when cloned. #[derive(Clone, Eq)] struct JumpTargetMarker(Rc<u32>); impl JumpTargetMarker { fn new() -> JumpTargetMarker { // Create somewhat unique hash data -- using part of // the pointer of the RcBox. let mut rc = Rc::new(0); let hash_data = rc.as_ref() as *const u32 as usize as u32; *Rc::get_mut(&mut rc).unwrap() = hash_data; JumpTargetMarker(rc) } } impl PartialEq for JumpTargetMarker { fn eq(&self, other: &JumpTargetMarker) -> bool { Rc::ptr_eq(&self.0, &other.0) } } impl Hash for JumpTargetMarker { fn hash<H: Hasher>(&self, hasher: &mut H) { hasher.write_u32(*self.0); } } impl std::fmt::Debug for JumpTargetMarker { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> { write!( f, "JumpMarker<{:08x}>", self.0.as_ref() as *const u32 as usize ) } } #[cfg(test)] mod tests { use super::{ compile_expression, AddressTransform, CompiledExpression, CompiledExpressionPart, FunctionFrameInfo, JumpTargetMarker, ValueLabel, ValueLabelsRanges, }; use crate::CompiledFunction; use gimli::{self, constants, Encoding, EndianSlice, Expression, RunTimeEndian}; use wasmtime_environ::FilePos; macro_rules! dw_op { (DW_OP_WASM_location) => { 0xed }; ($i:literal) => { $i }; ($d:ident) => { constants::$d.0 as u8 }; ($e:expr) => { $e as u8 }; } macro_rules! expression { ($($t:tt),*) => { Expression(EndianSlice::new( &[$(dw_op!($t)),*], RunTimeEndian::Little, )) } } fn find_jump_targets<'a>(ce: &'a CompiledExpression) -> Vec<&'a JumpTargetMarker> { ce.parts .iter() .filter_map(|p| { if let CompiledExpressionPart::LandingPad(t) = p { Some(t) } else { None } }) .collect::<Vec<_>>() } static DWARF_ENCODING: Encoding = Encoding { address_size: 4, format: gimli::Format::Dwarf32, version: 4, }; #[test] fn test_debug_expression_jump_target() { let m1 = JumpTargetMarker::new(); let m2 = JumpTargetMarker::new(); assert!(m1 != m2); assert!(m1 == m1.clone()); // Internal hash_data test (theoretically can fail intermittently). assert!(m1.0 != m2.0); } #[test] fn test_debug_parse_expressions() { use cranelift_entity::EntityRef; let (val1, val3, val20) = (ValueLabel::new(1), ValueLabel::new(3), ValueLabel::new(20)); let e = expression!(DW_OP_WASM_location, 0x0, 20, DW_OP_stack_value); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![CompiledExpressionPart::Local { label: val20, trailing: true }], need_deref: false, } ); let e = expression!( DW_OP_WASM_location, 0x0, 1, DW_OP_plus_uconst, 0x10, DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Local { label: val1, trailing: false }, CompiledExpressionPart::Code(vec![35, 16, 159]) ], need_deref: false, } ); let e = expression!(DW_OP_WASM_location, 0x0, 3, DW_OP_stack_value); let fe = compile_expression(&e, DWARF_ENCODING, None).expect("non-error"); let e = expression!(DW_OP_fbreg, 0x12); let ce = compile_expression(&e, DWARF_ENCODING, fe.as_ref()) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Local { label: val3, trailing: false }, CompiledExpressionPart::Code(vec![35, 18]) ], need_deref: true, } ); let e = expression!( DW_OP_WASM_location, 0x0, 1, DW_OP_plus_uconst, 5, DW_OP_deref, DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Local { label: val1, trailing: false }, CompiledExpressionPart::Code(vec![35, 5]), CompiledExpressionPart::Deref, CompiledExpressionPart::Code(vec![6, 159]) ], need_deref: false, } ); let e = expression!( DW_OP_WASM_location, 0x0, 1, DW_OP_lit16, DW_OP_shra, DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Local { label: val1, trailing: false }, CompiledExpressionPart::Code(vec![64, 35, 32, 22, 8, 32, 36, 22, 38, 159]) ], need_deref: false, } ); let e = expression!( DW_OP_lit1, DW_OP_dup, DW_OP_WASM_location, 0x0, 1, DW_OP_and, DW_OP_bra, 5, 0, // --> pointer DW_OP_swap, DW_OP_shr, DW_OP_skip, 2, 0, // --> done // pointer: DW_OP_plus, DW_OP_deref, // done: DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); let targets = find_jump_targets(&ce); assert_eq!(targets.len(), 2); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Code(vec![49, 18]), CompiledExpressionPart::Local { label: val1, trailing: false }, CompiledExpressionPart::Code(vec![26]), CompiledExpressionPart::Jump { conditionally: true, target: targets[0].clone(), }, CompiledExpressionPart::Code(vec![22, 35, 32, 22, 8, 32, 36, 22, 37]), CompiledExpressionPart::Jump { conditionally: false, target: targets[1].clone(), }, CompiledExpressionPart::LandingPad(targets[0].clone()), // capture from CompiledExpressionPart::Code(vec![34]), CompiledExpressionPart::Deref, CompiledExpressionPart::Code(vec![6]), CompiledExpressionPart::LandingPad(targets[1].clone()), // capture to CompiledExpressionPart::Code(vec![159]) ], need_deref: false, } ); let e = expression!( DW_OP_lit1, DW_OP_dup, DW_OP_bra, 2, 0, // --> target DW_OP_deref, DW_OP_lit0, // target: DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); let targets = find_jump_targets(&ce); assert_eq!(targets.len(), 1); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Code(vec![49, 18]), CompiledExpressionPart::Jump { conditionally: true, target: targets[0].clone(), }, CompiledExpressionPart::Deref, CompiledExpressionPart::Code(vec![6, 48]), CompiledExpressionPart::LandingPad(targets[0].clone()), // capture to CompiledExpressionPart::Code(vec![159]) ], need_deref: false, } ); let e = expression!( DW_OP_lit1, /* loop */ DW_OP_dup, DW_OP_lit25, DW_OP_ge, DW_OP_bra, 5, 0, // --> done DW_OP_plus_uconst, 1, DW_OP_skip, (-11 as i8), (!0), // --> loop /* done */ DW_OP_stack_value ); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); let targets = find_jump_targets(&ce); assert_eq!(targets.len(), 2); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Code(vec![49]), CompiledExpressionPart::LandingPad(targets[0].clone()), CompiledExpressionPart::Code(vec![18, 73, 42]), CompiledExpressionPart::Jump { conditionally: true, target: targets[1].clone(), }, CompiledExpressionPart::Code(vec![35, 1]), CompiledExpressionPart::Jump { conditionally: false, target: targets[0].clone(), }, CompiledExpressionPart::LandingPad(targets[1].clone()), CompiledExpressionPart::Code(vec![159]) ], need_deref: false, } ); let e = expression!(DW_OP_WASM_location, 0x0, 1, DW_OP_plus_uconst, 5); let ce = compile_expression(&e, DWARF_ENCODING, None) .expect("non-error") .expect("expression"); assert_eq!( ce, CompiledExpression { parts: vec![ CompiledExpressionPart::Local { label: val1, trailing: false }, CompiledExpressionPart::Code(vec![35, 5]) ], need_deref: true, } ); } fn create_mock_address_transform() -> AddressTransform { use crate::FunctionAddressMap; use cranelift_entity::PrimaryMap; use wasmtime_environ::InstructionAddressMap; use wasmtime_environ::WasmFileInfo; let mut module_map = PrimaryMap::new(); let code_section_offset: u32 = 100; module_map.push(CompiledFunction { address_map: FunctionAddressMap { instructions: vec![ InstructionAddressMap { srcloc: FilePos::new(code_section_offset + 12), code_offset: 5, }, InstructionAddressMap { srcloc: FilePos::default(), code_offset: 8, }, InstructionAddressMap { srcloc: FilePos::new(code_section_offset + 17), code_offset: 15, }, InstructionAddressMap { srcloc: FilePos::default(), code_offset: 23, }, ] .into(), start_srcloc: FilePos::new(code_section_offset + 10), end_srcloc: FilePos::new(code_section_offset + 20), body_offset: 0, body_len: 30, }, ..Default::default() }); let fi = WasmFileInfo { code_section_offset: code_section_offset.into(), funcs: Vec::new(), imported_func_count: 0, path: None, }; AddressTransform::new(&module_map, &fi) } fn create_mock_value_ranges() -> (ValueLabelsRanges, (ValueLabel, ValueLabel, ValueLabel)) { use cranelift_codegen::ir::LabelValueLoc; use cranelift_codegen::ValueLocRange; use cranelift_entity::EntityRef; use std::collections::HashMap; let mut value_ranges = HashMap::new(); let value_0 = ValueLabel::new(0); let value_1 = ValueLabel::new(1); let value_2 = ValueLabel::new(2); value_ranges.insert( value_0, vec![ValueLocRange { loc: LabelValueLoc::SPOffset(0), start: 0, end: 25, }], ); value_ranges.insert( value_1, vec![ValueLocRange { loc: LabelValueLoc::SPOffset(0), start: 5,
); value_ranges.insert( value_2, vec![ ValueLocRange { loc: LabelValueLoc::SPOffset(0), start: 0, end: 10, }, ValueLocRange { loc: LabelValueLoc::SPOffset(0), start: 20, end: 30, }, ], ); (value_ranges, (value_0, value_1, value_2)) } #[test] fn test_debug_value_range_builder() { use super::ValueLabelRangesBuilder; use crate::debug::ModuleMemoryOffset; use cranelift_codegen::ir::StackSlots; use wasmtime_environ::{DefinedFuncIndex, EntityRef}; let addr_tr = create_mock_address_transform(); let stack_slots = StackSlots::new(); let (value_ranges, value_labels) = create_mock_value_ranges(); let fi = FunctionFrameInfo { memory_offset: ModuleMemoryOffset::None, stack_slots: &stack_slots, value_ranges: &value_ranges, }; // No value labels, testing if entire function range coming through. let builder = ValueLabelRangesBuilder::new(&[(10, 20)], &addr_tr, Some(&fi)); let ranges = builder.into_ranges().collect::<Vec<_>>(); assert_eq!(ranges.len(), 1); assert_eq!(ranges[0].func_index, DefinedFuncIndex::new(0)); assert_eq!(ranges[0].start, 0); assert_eq!(ranges[0].end, 30); // Two labels ([email protected] and [email protected]), their common lifetime intersect at 5..25. let mut builder = ValueLabelRangesBuilder::new(&[(10, 20)], &addr_tr, Some(&fi)); builder.process_label(value_labels.0); builder.process_label(value_labels.1); let ranges = builder.into_ranges().collect::<Vec<_>>(); assert_eq!(ranges.len(), 1); assert_eq!(ranges[0].start, 5); assert_eq!(ranges[0].end, 25); // Adds val2 with complex lifetime @0..10 and @20..30 to the previous test, and // also narrows range. let mut builder = ValueLabelRangesBuilder::new(&[(11, 17)], &addr_tr, Some(&fi)); builder.process_label(value_labels.0); builder.process_label(value_labels.1); builder.process_label(value_labels.2); let ranges = builder.into_ranges().collect::<Vec<_>>(); // Result is two ranges @5..10 and @20..23 assert_eq!(ranges.len(), 2); assert_eq!(ranges[0].start, 5); assert_eq!(ranges[0].end, 10); assert_eq!(ranges[1].start, 20); assert_eq!(ranges[1].end, 23); } }
end: 30, }],
game1.py
import cv2 import random import math import numpy as np import os import sys from jangjorim_client import resource_path # constants threshold = 50 interpol = 0.7 velocity = 0 # target color target_color = { "blue" : (255,0,0), "green" : (0,255,0), "red" : (0,0,255), "yellow" : (0,255,255), "origin" : (255,255,255) } # game explanation explain = "Catching 5 stars in 10 seconds" # target shape img_rate = 0.8 star_img = cv2.imread(resource_path("jangjorim_games/image/Frame 137.png"), cv2.IMREAD_UNCHANGED) star_img = cv2.resize(star_img, dsize=(int(63*img_rate),int(60*img_rate))) def distance(hand, target): distance = [] for i in range(2): distance.append(math.sqrt((hand[i][0]-target[0])**2 + (hand[i][1]-target[1])**2)) return min(distance[0], distance[1]) def touch_target(img, hand, elbow , target, draw_hand): touched = False left_hand = (((1+interpol)*hand[0][1]-interpol*elbow[0][1]).astype(np.int32), ((1+interpol)*hand[0][0]-interpol*elbow[0][0]).astype(np.int32)) right_hand = (((1+interpol)*hand[1][1]-interpol*elbow[1][1]).astype(np.int32), ((1+interpol)*hand[1][0]-interpol*elbow[1][0]).astype(np.int32)) if draw_hand: img = cv2.circle(img, left_hand, radius = 15, color=(0,255,0), thickness=-1) img = cv2.circle(img, right_hand, radius = 15, color=(0,255,0), thickness=-1) if not target is None: if distance([left_hand,right_hand], target) < threshold: target = None touched = True return img, target, touched def random_target(img, target, target_shape = "star", color = "blue"): if target is None: target = (random.choice([random.randint(25,60),random.randint(img.shape[1]-60,img.shape[1]-25)]),random.randint(100,img.shape[0]-25)) else: target = (target[0], target[1]+ velocity) if(target[1] > img.shape[0] - 30): target = None return img, target if target_shape is "circle": img = cv2.circle(img, target, radius = 15, color=target_color[color], thickness = -1) elif target_shape is "star": img = image_overlapping(img,star_img, target, target_color[color]) return img, target def image_overlapping(screen, img, pos, color): x_size, y_size, _ = img.shape x1, x2 = pos[1], pos[1]+x_size y1, y2 = pos[0], pos[0]+y_size if y2 >= screen.shape[1]: y2 = screen.shape[1]-1 y1 = y2 - y_size if y1 < 0: y1 = 0 y2 = y1 + y_size if x2 >= screen.shape[0]: x2 = screen.shape[0]-1 x1 = x2 - x_size if x1 < 0: x1 = 0
alpha_img = img[:,:,3]/255.0 alpha_screen = 1- alpha_img for c in range(3): screen[x1:x2, y1:y2, c] = alpha_img * img[:,:,c]* color[c]/255.0 + alpha_screen * screen[x1:x2, y1:y2, c] return screen # def image_overlapping(screen, img, pos, color): # x_size, y_size, _ = img.shape # x1, x2 = pos[0]-int(x_size/2), pos[0]+x_size-int(x_size/2) # y1, y2 = pos[1]-int(y_size/2), pos[1]+y_size-int(y_size/2) # alpha_img = img[:,:,3]/255.0 # alpha_screen = 1- alpha_img # for c in range(3): # screen[y1:y2, x1:x2, c] = alpha_img * img[:,:,c] * color[c]/255.0 + alpha_screen * screen[y1:y2, x1:x2, c] # return screen
x2 = x1 + x_size
model_company_directory_account_info.go
/* * RingCentral Connect Platform API Explorer * * <p>This is a beta interactive API explorer for the RingCentral Connect Platform. To use this service, you will need to have an account with the proper credentials to generate an OAuth2 access token.</p><p><h2>Quick Start</h2></p><ol><li>1) Go to <b>Authentication > /oauth/token</b></li><li>2) Enter <b>app_key, app_secret, username, password</b> fields and then click \"Try it out!\"</li><li>3) Upon success, your access_token is loaded and you can access any form requiring authorization.</li></ol><h2>Links</h2><ul><li><a href=\"https://github.com/ringcentral\" target=\"_blank\">RingCentral SDKs on Github</a></li><li><a href=\"mailto:[email protected]\">RingCentral Developer Support Email</a></li></ul>
package ringcentral type CompanyDirectoryAccountInfo struct { // Internal identifier of an account Id string `json:"id,omitempty"` }
* * API version: 1.0 * Generated by: OpenAPI Generator (https://openapi-generator.tech) */
indexes.py
from abc import ABC, abstractmethod from pathlib import Path from virtool_workflow.data_model import Index from virtool_workflow.data_model.files import VirtoolFileFormat
class AbstractIndexProvider(ABC): @abstractmethod async def get(self) -> Index: """Get the current index.""" ... @abstractmethod async def upload(self, path: Path, format: VirtoolFileFormat) -> Path: """Upload a file associated with the index.""" ... @abstractmethod async def download(self, target_path: Path, *names) -> Path: """Download files associated with the index.""" ... @abstractmethod async def finalize(self): """Mark that the index associated with the current job has a json representation of the reference available.""" ... def __await__(self): return self.get().__await__()
fields.service.ts
import { NewFieldsDto } from "./../dto/new-fields.dto"; import { FieldsRepository } from "./../repository/fields.repository"; import {GenericService} from "../../commons/services/generic.service"; import { Fields } from "../entity/fields.entity"; import { UpdateFieldsDto } from "../dto/update-fields.dto"; import { Injectable, Inject, forwardRef } from "@nestjs/common"; @Injectable() export class FieldsService extends GenericService< Fields, FieldsRepository, NewFieldsDto, UpdateFieldsDto > { constructor( @Inject(forwardRef(() => FieldsRepository)) public readonly fieldsRepository: FieldsRepository, @Inject(forwardRef(() => AttributesRepository)) public readonly attributesRepository: AttributesRepository ) { super(fieldsRepository, "Fields"); } setNeededFieldsOnChildren(clientId:string, item:Fields){ item?.attributes?.forEach(f => f.clientId = clientId); } public async validateParent(clientId:string, id:string): Promise<boolean>{ return await this.attributesRepository.findOne({where: {clientId: clientId, id: id}}) != null; } protected getRelations(): Array<string> { return ["attributes","events"]; } }
import { AttributesRepository } from './../repository/attributes.repository';
basic-spec.ts
import test from "ava"; import { marbles } from "rxjs-marbles/ava";
test( "should support marble tests without values", marbles((m, t) => { t.plan(2); const source = m.hot(" --^-a-b-c-|"); const subs = " ^-------!"; const expected = m.cold(" --b-c-d-|"); const destination = source.pipe( map(value => String.fromCharCode(value.charCodeAt(0) + 1)) ); m.expect(destination).toBeObservable(expected); m.expect(source).toHaveSubscriptions(subs); }) ); test( "should support marble tests with values", marbles((m, t) => { t.plan(2); const inputs = { a: 1, b: 2, c: 3 }; const outputs = { x: 2, y: 3, z: 4 }; const source = m.hot(" --^-a-b-c-|", inputs); const subs = " ^-------!"; const expected = m.cold(" --x-y-z-|", outputs); const destination = source.pipe(map(value => value + 1)); m.expect(destination).toBeObservable(expected); m.expect(source).toHaveSubscriptions(subs); }) ); test( "should support marble tests with errors", marbles((m, t) => { t.plan(2); const source = m.hot(" --^-a-b-c-#"); const subs = " ^-------!"; const expected = m.cold(" --a-b-c-#"); const destination = source; m.expect(destination).toBeObservable(expected); m.expect(source).toHaveSubscriptions(subs); }) ); test( "should support marble tests with explicit errors", marbles((m, t) => { t.plan(2); const inputs = { a: 1, b: 2, c: 3 }; const outputs = { x: 2, y: 3, z: 4 }; const source = m.hot(" --^-a-b-c-#", inputs, new Error("Boom!")); const subs = " ^-------!"; const expected = m.cold(" --x-y-z-#", outputs, new Error("Boom!")); const destination = source.pipe(map(value => value + 1)); m.expect(destination).toBeObservable(expected); m.expect(source).toHaveSubscriptions(subs); }) );
import { map } from "rxjs/operators";
formdesigner.py
import json import logging from django.conf import settings from django.contrib import messages from django.http import Http404, HttpResponse, HttpResponseBadRequest from django.shortcuts import render from django.urls import reverse from django.utils.translation import ugettext as _ from django.views.decorators.http import require_GET from couchdbkit.exceptions import ResourceConflict from dimagi.utils.logging import notify_exception from corehq import privileges, toggles from corehq.apps.accounting.utils import domain_has_privilege from corehq.apps.analytics.tasks import ( HUBSPOT_FORM_BUILDER_FORM_ID, send_hubspot_form, ) from corehq.apps.app_manager import add_ons from corehq.apps.app_manager.app_schemas.casedb_schema import get_casedb_schema from corehq.apps.app_manager.app_schemas.session_schema import ( get_session_schema, ) from corehq.apps.app_manager.const import ( SCHEDULE_CURRENT_VISIT_NUMBER, SCHEDULE_GLOBAL_NEXT_VISIT_DATE, SCHEDULE_NEXT_DUE, SCHEDULE_UNSCHEDULED_VISIT, ) from corehq.apps.app_manager.dbaccessors import get_app from corehq.apps.app_manager.decorators import require_can_edit_apps from corehq.apps.app_manager.exceptions import ( AppManagerException, FormNotFoundException, ) from corehq.apps.app_manager.models import Form, ModuleNotFoundException from corehq.apps.app_manager.templatetags.xforms_extras import translate from corehq.apps.app_manager.util import ( app_callout_templates, is_linked_app, is_usercase_in_use, ) from corehq.apps.app_manager.views.apps import get_apps_base_context from corehq.apps.app_manager.views.forms import FormHasSubmissionsView from corehq.apps.app_manager.views.notifications import ( get_facility_for_form, notify_form_opened, ) from corehq.apps.app_manager.views.utils import ( back_to_main, bail, form_has_submissions, set_lang_cookie, ) from corehq.apps.cloudcare.utils import should_show_preview_app from corehq.apps.domain.decorators import track_domain_request from corehq.apps.fixtures.fixturegenerators import item_lists_by_domain from corehq.apps.hqwebapp.templatetags.hq_shared_tags import cachebuster from corehq.util.context_processors import websockets_override logger = logging.getLogger(__name__) @require_can_edit_apps @track_domain_request(calculated_prop='cp_n_form_builder_entered') def form_source(request, domain, app_id, form_unique_id): app = get_app(domain, app_id) try: form = app.get_form(form_unique_id) except FormNotFoundException: return bail(request, domain, app_id, not_found="form") try: module = form.get_module() except AttributeError: return bail(request, domain, app_id, not_found="module") return _get_form_designer_view(request, domain, app, module, form) @require_can_edit_apps def form_source_legacy(request, domain, app_id, module_id=None, form_id=None): """ This view has been kept around to not break any documentation on example apps and partner-distributed documentation on existing apps. PLEASE DO NOT DELETE. """ app = get_app(domain, app_id) try: module = app.get_module(module_id) except ModuleNotFoundException: return bail(request, domain, app_id, not_found="module") try: form = module.get_form(form_id) except IndexError: return bail(request, domain, app_id, not_found="form") return _get_form_designer_view(request, domain, app, module, form) def _get_form_designer_view(request, domain, app, module, form): if app and app.copy_of: messages.warning(request, _( "You tried to edit a form that was from a previous release, so " "we have directed you to the latest version of your application." )) return back_to_main(request, domain, app_id=app.id) if form.no_vellum: messages.warning(request, _( "You tried to edit this form in the Form Builder. " "However, your administrator has locked this form against editing " "in the form builder, so we have redirected you to " "the form's front page instead." )) return back_to_main(request, domain, app_id=app.id, form_unique_id=form.unique_id) if is_linked_app(app): messages.warning(request, _( "You tried to edit this form in the Form Builder. " "However, this is a linked application and you can only make changes to the " "upstream version." )) return back_to_main(request, domain, app_id=app.id) send_hubspot_form(HUBSPOT_FORM_BUILDER_FORM_ID, request) def _form_too_large(_app, _form): # form less than 0.1MB, anything larger starts to have # performance issues with fullstory return _app.blobs['{}.xml'.format(_form.unique_id)]['content_length'] > 102400 context = get_apps_base_context(request, domain, app) context.update(locals()) vellum_options = _get_base_vellum_options(request, domain, app, context['lang']) vellum_options['core'] = _get_vellum_core_context(request, domain, app, module, form, context['lang']) vellum_options['plugins'] = _get_vellum_plugins(domain, form, module) vellum_options['features'] = _get_vellum_features(request, domain, app) context['vellum_options'] = vellum_options context.update({ 'vellum_debug': settings.VELLUM_DEBUG, 'nav_form': form, 'formdesigner': True, 'include_fullstory': not _form_too_large(app, form), 'CKEDITOR_BASEPATH': "app_manager/js/vellum/lib/ckeditor/", 'show_live_preview': should_show_preview_app( request, app, request.couch_user.username, ), 'show_ui_notification_to_hide_translations': (len(app.langs) > 2), }) context.update(_get_requirejs_context()) if request.user.is_superuser: context.update({'notification_options': _get_notification_options(request, domain, app, form)}) notify_form_opened(domain, request.couch_user, app.id, form.unique_id) response = render(request, "app_manager/form_designer.html", context) set_lang_cookie(response, context['lang']) return response @require_GET @require_can_edit_apps def get_form_data_schema(request, domain, form_unique_id): """Get data schema One of `app_id` or `form_unique_id` is required. `app_id` is ignored if `form_unique_id` is provided. :returns: A list of data source schema definitions. A data source schema definition is a dictionary. For details on the content of the dictionary, see https://github.com/dimagi/Vellum/blob/master/src/datasources.js """ data = [] try: form, app = Form.get_form(form_unique_id, and_app=True) except ResourceConflict:
raise Http404() if app.domain != domain: raise Http404() try: data.append(get_session_schema(form)) if form.requires_case() or is_usercase_in_use(domain): data.append(get_casedb_schema(form)) except AppManagerException as e: notify_exception(request, message=str(e)) return HttpResponseBadRequest( str(e) or _("There is an error in the case management of your application. " "Please fix the error to see case properties in this tree") ) except Exception as e: notify_exception(request, message=str(e)) return HttpResponseBadRequest("schema error, see log for details") data.extend( sorted(item_lists_by_domain(domain), key=lambda x: x['name'].lower()) ) kw = {} if "pretty" in request.GET: kw["indent"] = 2 return HttpResponse(json.dumps(data, **kw)) @require_GET def ping(request): return HttpResponse("pong") def _get_base_vellum_options(request, domain, app, displayLang): """ Returns the base set of options that will be passed into Vellum when it is initialized. :param displayLang: --> derived from the base context """ return { 'intents': { 'templates': next(app_callout_templates), }, 'javaRosa': { 'langs': app.langs, 'displayLanguage': displayLang, 'showOnlyCurrentLang': (app.smart_lang_display and (len(app.langs) > 2)), }, 'uploader': { 'uploadUrls': { 'image': reverse("hqmedia_uploader_image", args=[domain, app.id]), 'audio': reverse("hqmedia_uploader_audio", args=[domain, app.id]), 'video': reverse("hqmedia_uploader_video", args=[domain, app.id]), 'text': reverse("hqmedia_uploader_text", args=[domain, app.id]), }, 'objectMap': app.get_object_map(), 'sessionid': request.COOKIES.get('sessionid'), }, } def _get_vellum_core_context(request, domain, app, module, form, lang): """ Returns the core context that will be passed into vellum when it is initialized. """ core = { 'dataSourcesEndpoint': reverse('get_form_data_schema', kwargs={'domain': domain, 'form_unique_id': form.get_unique_id()}), 'form': form.source, 'formId': form.get_unique_id(), 'formName': translate(form.name, app.langs[0], app.langs), 'saveType': 'patch', 'saveUrl': reverse('edit_form_attr', args=[domain, app.id, form.get_unique_id(), 'xform']), 'patchUrl': reverse('patch_xform', args=[domain, app.id, form.get_unique_id()]), 'hasSubmissions': form_has_submissions(domain, app.id, form.get_unique_id()), 'hasSubmissionsUrl': reverse(FormHasSubmissionsView.urlname, args=[domain, app.id, form.get_unique_id()]), 'allowedDataNodeReferences': [ "meta/deviceID", "meta/instanceID", "meta/username", "meta/userID", "meta/timeStart", "meta/timeEnd", "meta/location", ] + _get_core_context_scheduler_data_nodes(module, form), 'activityUrl': reverse('ping'), 'sessionid': request.COOKIES.get('sessionid'), 'externalLinks': { 'changeSubscription': reverse("domain_subscription_view", kwargs={'domain': domain}), }, 'invalidCaseProperties': ['name'], } core.update(_get_core_context_help_text_context(form)) return core def _get_vellum_plugins(domain, form, module): """ Returns a list of enabled vellum plugins based on the domain's privileges. """ vellum_plugins = ["modeliteration", "itemset", "atwho"] if (toggles.COMMTRACK.enabled(domain) or toggles.NON_COMMTRACK_LEDGERS.enabled(domain)): vellum_plugins.append("commtrack") if toggles.VELLUM_SAVE_TO_CASE.enabled(domain): vellum_plugins.append("saveToCase") form_uses_case = ( (module and module.case_type and form.requires_case()) or is_usercase_in_use(domain) ) form_is_basic = form.doc_type == 'Form' if form_uses_case and form_is_basic: vellum_plugins.append("databrowser") return vellum_plugins def _get_vellum_features(request, domain, app): """ Returns the context of features passed into vellum when it is initialized. """ vellum_features = toggles.toggles_dict(username=request.user.username, domain=domain) vellum_features.update({ 'group_in_field_list': app.enable_group_in_field_list, 'image_resize': app.enable_image_resize, 'markdown_in_groups': app.enable_markdown_in_groups, 'lookup_tables': domain_has_privilege(domain, privileges.LOOKUP_TABLES), 'templated_intents': domain_has_privilege(domain, privileges.TEMPLATED_INTENTS), 'custom_intents': domain_has_privilege(domain, privileges.CUSTOM_INTENTS), 'rich_text': True, 'sorted_itemsets': app.enable_sorted_itemsets, 'advanced_itemsets': add_ons.show("advanced_itemsets", request, app), }) return vellum_features def _get_core_context_help_text_context(form): """ Part of the vellum core context. Returns the appropriate icon context for the form type and the knockout template ID context for the correct help text information when opening a blank form with this type. """ if form.get_action_type() == 'open': default_help_text_template_id = '#fd-hq-helptext-registration' form_icon_class = 'fcc fcc-app-createform' elif form.get_action_type() == 'close': default_help_text_template_id = '#fd-hq-helptext-close' form_icon_class = 'fcc fcc-app-completeform' elif form.get_action_type() == 'update': default_help_text_template_id = '#fd-hq-helptext-followup' form_icon_class = 'fcc fcc-app-updateform' else: default_help_text_template_id = '#fd-hq-helptext-survey' form_icon_class = 'fa fa-file-o' return { 'defaultHelpTextTemplateId': default_help_text_template_id, 'formIconClass': form_icon_class, } def _get_core_context_scheduler_data_nodes(module, form): """ Part of the vellum core context. Returns a list of enabled scheduler data nodes. """ has_schedule = ( getattr(module, 'has_schedule', False) and getattr(form, 'schedule', False) and form.schedule.enabled ) scheduler_data_nodes = [] if has_schedule: scheduler_data_nodes = [ SCHEDULE_CURRENT_VISIT_NUMBER, SCHEDULE_NEXT_DUE, SCHEDULE_UNSCHEDULED_VISIT, SCHEDULE_GLOBAL_NEXT_VISIT_DATE, ] scheduler_data_nodes.extend([ "next_{}".format(f.schedule_form_id) for f in form.get_phase().get_forms() if getattr(f, 'schedule', False) and f.schedule.enabled ]) return scheduler_data_nodes def _get_notification_options(request, domain, app, form): notification_options = websockets_override(request) if notification_options['WS4REDIS_HEARTBEAT'] in ['null', 'undefined']: notification_options['WS4REDIS_HEARTBEAT'] = None notification_options.update({ 'notify_facility': get_facility_for_form(domain, app.id, form.unique_id), 'user_id': request.couch_user.get_id, }) return notification_options def _get_requirejs_context(): requirejs = { 'requirejs_args': 'version={}{}'.format( cachebuster("app_manager/js/vellum/src/main-components.js"), cachebuster("app_manager/js/vellum/src/local-deps.js") ), } if not settings.VELLUM_DEBUG: requirejs_url = "app_manager/js/vellum/src" elif settings.VELLUM_DEBUG == "dev-min": requirejs_url = "formdesigner/_build/src" else: requirejs_url = "formdesigner/src" requirejs['requirejs_url'] = requirejs_url return requirejs
index.ts
/* * Copyright (c) Microsoft Corporation. * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ import * as coreHttp from "@azure/core-http"; /** Storage Service Properties. */ export interface QueueServiceProperties { /** Azure Analytics Logging settings */ queueAnalyticsLogging?: Logging; /** A summary of request statistics grouped by API in hourly aggregates for queues */ hourMetrics?: Metrics; /** a summary of request statistics grouped by API in minute aggregates for queues */ minuteMetrics?: Metrics; /** The set of CORS rules. */ cors?: CorsRule[]; } /** Azure Analytics Logging settings. */ export interface Logging { /** The version of Storage Analytics to configure. */ version: string; /** Indicates whether all delete requests should be logged. */ deleteProperty: boolean; /** Indicates whether all read requests should be logged. */ read: boolean; /** Indicates whether all write requests should be logged. */ write: boolean; /** the retention policy */ retentionPolicy: RetentionPolicy; } /** the retention policy */ export interface RetentionPolicy { /** Indicates whether a retention policy is enabled for the storage service */ enabled: boolean; /** Indicates the number of days that metrics or logging or soft-deleted data should be retained. All data older than this value will be deleted */ days?: number; } /** An interface representing Metrics. */ export interface Metrics { /** The version of Storage Analytics to configure. */ version?: string; /** Indicates whether metrics are enabled for the Queue service. */ enabled: boolean; /** Indicates whether metrics should generate summary statistics for called API operations. */ includeAPIs?: boolean; /** the retention policy */ retentionPolicy?: RetentionPolicy; } /** CORS is an HTTP feature that enables a web application running under one domain to access resources in another domain. Web browsers implement a security restriction known as same-origin policy that prevents a web page from calling APIs in a different domain; CORS provides a secure way to allow one domain (the origin domain) to call APIs in another domain */ export interface CorsRule { /** The origin domains that are permitted to make a request against the storage service via CORS. The origin domain is the domain from which the request originates. Note that the origin must be an exact case-sensitive match with the origin that the user age sends to the service. You can also use the wildcard character '*' to allow all origin domains to make requests via CORS. */ allowedOrigins: string; /** The methods (HTTP request verbs) that the origin domain may use for a CORS request. (comma separated) */ allowedMethods: string; /** the request headers that the origin domain may specify on the CORS request. */ allowedHeaders: string; /** The response headers that may be sent in the response to the CORS request and exposed by the browser to the request issuer */ exposedHeaders: string; /** The maximum amount time that a browser should cache the preflight OPTIONS request. */ maxAgeInSeconds: number; } export interface StorageError { message?: string; code?: string; } /** Stats for the storage service. */ export interface QueueServiceStatistics { /** Geo-Replication information for the Secondary Storage Service */ geoReplication?: GeoReplication; } /** Geo-Replication information for the Secondary Storage Service */ export interface GeoReplication { /** The status of the secondary location */ status: GeoReplicationStatusType; /** A GMT date/time value, to the second. All primary writes preceding this value are guaranteed to be available for read operations at the secondary. Primary writes after this point in time may or may not be available for reads. */ lastSyncOn: Date; } /** The object returned when calling List Queues on a Queue Service. */ export interface ListQueuesSegmentResponse { serviceEndpoint: string; prefix: string; marker?: string; maxPageSize: number; queueItems?: QueueItem[]; continuationToken: string; } /** An Azure Storage Queue. */ export interface QueueItem { /** The name of the Queue. */ name: string; /** Dictionary of <string> */ metadata?: { [propertyName: string]: string }; } /** signed identifier */ export interface SignedIdentifier { /** a unique id */ id: string; /** The access policy */ accessPolicy: AccessPolicy; } /** An Access policy */ export interface AccessPolicy { /** the date-time the policy is active */ startsOn?: string; /** the date-time the policy expires */ expiresOn?: string; /** the permissions for the acl policy */ permissions?: string; } /** The object returned in the QueueMessageList array when calling Get Messages on a Queue. */ export interface DequeuedMessageItem { /** The Id of the Message. */ messageId: string; /** The time the Message was inserted into the Queue. */ insertedOn: Date; /** The time that the Message will expire and be automatically deleted. */ expiresOn: Date; /** This value is required to delete the Message. If deletion fails using this popreceipt then the message has been dequeued by another client. */ popReceipt: string; /** The time that the message will again become visible in the Queue. */ nextVisibleOn: Date; /** The number of times the message has been dequeued. */ dequeueCount: number; /** The content of the Message. */ messageText: string; } /** A Message object which can be stored in a Queue */ export interface QueueMessage { /** The content of the message */ messageText: string; } /** The object returned in the QueueMessageList array when calling Put Message on a Queue */ export interface EnqueuedMessage { /** The Id of the Message. */ messageId: string; /** The time the Message was inserted into the Queue. */ insertedOn: Date; /** The time that the Message will expire and be automatically deleted. */ expiresOn: Date; /** This value is required to delete the Message. If deletion fails using this popreceipt then the message has been dequeued by another client. */ popReceipt: string; /** The time that the message will again become visible in the Queue. */ nextVisibleOn: Date; } /** The object returned in the QueueMessageList array when calling Peek Messages on a Queue */ export interface PeekedMessageItem { /** The Id of the Message. */ messageId: string; /** The time the Message was inserted into the Queue. */ insertedOn: Date; /** The time that the Message will expire and be automatically deleted. */ expiresOn: Date; /** The number of times the message has been dequeued. */ dequeueCount: number; /** The content of the Message. */ messageText: string; } /** Defines headers for Service_setProperties operation. */ export interface ServiceSetPropertiesHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Service_setProperties operation. */ export interface ServiceSetPropertiesExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Service_getProperties operation. */ export interface ServiceGetPropertiesHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Service_getProperties operation. */ export interface ServiceGetPropertiesExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Service_getStatistics operation. */ export interface ServiceGetStatisticsHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Service_getStatistics operation. */ export interface ServiceGetStatisticsExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Service_listQueuesSegment operation. */ export interface ServiceListQueuesSegmentHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Service_listQueuesSegment operation. */ export interface ServiceListQueuesSegmentExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_create operation. */ export interface QueueCreateHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_create operation. */ export interface QueueCreateExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_delete operation. */ export interface QueueDeleteHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_delete operation. */ export interface QueueDeleteExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_getProperties operation. */ export interface QueueGetPropertiesHeaders { metadata?: { [propertyName: string]: string }; /** The approximate number of messages in the queue. This number is not lower than the actual number of messages in the queue, but could be higher. */ approximateMessagesCount?: number; /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_getProperties operation. */ export interface QueueGetPropertiesExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_setMetadata operation. */ export interface QueueSetMetadataHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_setMetadata operation. */ export interface QueueSetMetadataExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_getAccessPolicy operation. */ export interface QueueGetAccessPolicyHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_getAccessPolicy operation. */ export interface QueueGetAccessPolicyExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Queue_setAccessPolicy operation. */ export interface QueueSetAccessPolicyHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Queue_setAccessPolicy operation. */ export interface QueueSetAccessPolicyExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Messages_dequeue operation. */ export interface MessagesDequeueHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Messages_dequeue operation. */ export interface MessagesDequeueExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Messages_clear operation. */ export interface MessagesClearHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Messages_clear operation. */ export interface MessagesClearExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Messages_enqueue operation. */ export interface MessagesEnqueueHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Messages_enqueue operation. */ export interface MessagesEnqueueExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for Messages_peek operation. */ export interface MessagesPeekHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for Messages_peek operation. */ export interface MessagesPeekExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for MessageId_update operation. */ export interface MessageIdUpdateHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** The pop receipt of the queue message. */ popReceipt?: string; /** A UTC date/time value that represents when the message will be visible on the queue. */ nextVisibleOn?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for MessageId_update operation. */ export interface MessageIdUpdateExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Defines headers for MessageId_delete operation. */ export interface MessageIdDeleteHeaders { /** This header uniquely identifies the request that was made and can be used for troubleshooting the request. */ requestId?: string; /** Indicates the version of the Queue service used to execute the request. This header is returned for requests made against version 2009-09-19 and above. */ version?: string; /** UTC date/time value generated by the service that indicates the time at which the response was initiated */ date?: Date; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; /** Error Code */ errorCode?: string; } /** Defines headers for MessageId_delete operation. */ export interface MessageIdDeleteExceptionHeaders { errorCode?: string; /** If a client request id header is sent in the request, this header will be present in the response with the same value. */ clientRequestId?: string; } /** Known values of {@link StorageErrorCode} that the service accepts. */ export const enum KnownStorageErrorCode { AccountAlreadyExists = "AccountAlreadyExists", AccountBeingCreated = "AccountBeingCreated", AccountIsDisabled = "AccountIsDisabled", AuthenticationFailed = "AuthenticationFailed", AuthorizationFailure = "AuthorizationFailure", ConditionHeadersNotSupported = "ConditionHeadersNotSupported", ConditionNotMet = "ConditionNotMet", EmptyMetadataKey = "EmptyMetadataKey", InsufficientAccountPermissions = "InsufficientAccountPermissions", InternalError = "InternalError", InvalidAuthenticationInfo = "InvalidAuthenticationInfo", InvalidHeaderValue = "InvalidHeaderValue", InvalidHttpVerb = "InvalidHttpVerb", InvalidInput = "InvalidInput", InvalidMd5 = "InvalidMd5", InvalidMetadata = "InvalidMetadata", InvalidQueryParameterValue = "InvalidQueryParameterValue", InvalidRange = "InvalidRange", InvalidResourceName = "InvalidResourceName", InvalidUri = "InvalidUri", InvalidXmlDocument = "InvalidXmlDocument", InvalidXmlNodeValue = "InvalidXmlNodeValue", Md5Mismatch = "Md5Mismatch", MetadataTooLarge = "MetadataTooLarge", MissingContentLengthHeader = "MissingContentLengthHeader", MissingRequiredQueryParameter = "MissingRequiredQueryParameter", MissingRequiredHeader = "MissingRequiredHeader", MissingRequiredXmlNode = "MissingRequiredXmlNode", MultipleConditionHeadersNotSupported = "MultipleConditionHeadersNotSupported", OperationTimedOut = "OperationTimedOut", OutOfRangeInput = "OutOfRangeInput", OutOfRangeQueryParameterValue = "OutOfRangeQueryParameterValue", RequestBodyTooLarge = "RequestBodyTooLarge", ResourceTypeMismatch = "ResourceTypeMismatch", RequestUrlFailedToParse = "RequestUrlFailedToParse", ResourceAlreadyExists = "ResourceAlreadyExists", ResourceNotFound = "ResourceNotFound", ServerBusy = "ServerBusy", UnsupportedHeader = "UnsupportedHeader", UnsupportedXmlNode = "UnsupportedXmlNode", UnsupportedQueryParameter = "UnsupportedQueryParameter", UnsupportedHttpVerb = "UnsupportedHttpVerb", InvalidMarker = "InvalidMarker", MessageNotFound = "MessageNotFound", MessageTooLarge = "MessageTooLarge", PopReceiptMismatch = "PopReceiptMismatch", QueueAlreadyExists = "QueueAlreadyExists", QueueBeingDeleted = "QueueBeingDeleted", QueueDisabled = "QueueDisabled", QueueNotEmpty = "QueueNotEmpty", QueueNotFound = "QueueNotFound", AuthorizationSourceIPMismatch = "AuthorizationSourceIPMismatch", AuthorizationProtocolMismatch = "AuthorizationProtocolMismatch", AuthorizationPermissionMismatch = "AuthorizationPermissionMismatch", AuthorizationServiceMismatch = "AuthorizationServiceMismatch", AuthorizationResourceTypeMismatch = "AuthorizationResourceTypeMismatch", FeatureVersionMismatch = "FeatureVersionMismatch" } /** * Defines values for StorageErrorCode. \ * {@link KnownStorageErrorCode} can be used interchangeably with StorageErrorCode, * this enum contains the known values that the service supports. * ### Know values supported by the service * **AccountAlreadyExists** \ * **AccountBeingCreated** \ * **AccountIsDisabled** \ * **AuthenticationFailed** \ * **AuthorizationFailure** \ * **ConditionHeadersNotSupported** \ * **ConditionNotMet** \ * **EmptyMetadataKey** \ * **InsufficientAccountPermissions** \ * **InternalError** \ * **InvalidAuthenticationInfo** \ * **InvalidHeaderValue** \ * **InvalidHttpVerb** \ * **InvalidInput** \ * **InvalidMd5** \ * **InvalidMetadata** \ * **InvalidQueryParameterValue** \ * **InvalidRange** \ * **InvalidResourceName** \ * **InvalidUri** \ * **InvalidXmlDocument** \ * **InvalidXmlNodeValue** \ * **Md5Mismatch** \ * **MetadataTooLarge** \ * **MissingContentLengthHeader** \ * **MissingRequiredQueryParameter** \ * **MissingRequiredHeader** \ * **MissingRequiredXmlNode** \ * **MultipleConditionHeadersNotSupported** \ * **OperationTimedOut** \ * **OutOfRangeInput** \ * **OutOfRangeQueryParameterValue** \ * **RequestBodyTooLarge** \ * **ResourceTypeMismatch** \ * **RequestUrlFailedToParse** \ * **ResourceAlreadyExists** \ * **ResourceNotFound** \ * **ServerBusy** \ * **UnsupportedHeader** \ * **UnsupportedXmlNode** \ * **UnsupportedQueryParameter** \ * **UnsupportedHttpVerb** \ * **InvalidMarker** \ * **MessageNotFound** \ * **MessageTooLarge** \ * **PopReceiptMismatch** \ * **QueueAlreadyExists** \ * **QueueBeingDeleted** \ * **QueueDisabled** \ * **QueueNotEmpty** \ * **QueueNotFound** \ * **AuthorizationSourceIPMismatch** \ * **AuthorizationProtocolMismatch** \ * **AuthorizationPermissionMismatch** \ * **AuthorizationServiceMismatch** \ * **AuthorizationResourceTypeMismatch** \ * **FeatureVersionMismatch** */ export type StorageErrorCode = string; /** Defines values for GeoReplicationStatusType. */ export type GeoReplicationStatusType = "live" | "bootstrap" | "unavailable"; /** Optional parameters. */ export interface ServiceSetPropertiesOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the setProperties operation. */ export type ServiceSetPropertiesResponse = ServiceSetPropertiesHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: ServiceSetPropertiesHeaders; }; }; /** Optional parameters. */ export interface ServiceGetPropertiesOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the getProperties operation. */ export type ServiceGetPropertiesResponse = ServiceGetPropertiesHeaders & QueueServiceProperties & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: QueueServiceProperties; /** The parsed HTTP response headers. */ parsedHeaders: ServiceGetPropertiesHeaders; }; }; /** Optional parameters. */ export interface ServiceGetStatisticsOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the getStatistics operation. */ export type ServiceGetStatisticsResponse = ServiceGetStatisticsHeaders & QueueServiceStatistics & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: QueueServiceStatistics; /** The parsed HTTP response headers. */ parsedHeaders: ServiceGetStatisticsHeaders; }; }; /** Optional parameters. */ export interface ServiceListQueuesSegmentOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Filters the results to return only queues whose name begins with the specified prefix. */ prefix?: string; /** A string value that identifies the portion of the list of queues to be returned with the next listing operation. The operation returns the ContinuationToken value within the response body if the listing operation did not return all queues remaining to be listed with the current page. The NextMarker value can be used as the value for the marker parameter in a subsequent call to request the next page of list items. The marker value is opaque to the client. */ marker?: string; /** Specifies the maximum number of queues to return. If the request does not specify maxresults, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remainder of the results. For this reason, it is possible that the service will return fewer results than specified by maxresults, or than the default of 5000. */ maxPageSize?: number; /** Include this parameter to specify that the queues' metadata be returned as part of the response body. */ include?: string[]; } /** Contains response data for the listQueuesSegment operation. */ export type ServiceListQueuesSegmentResponse = ServiceListQueuesSegmentHeaders & ListQueuesSegmentResponse & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: ListQueuesSegmentResponse; /** The parsed HTTP response headers. */ parsedHeaders: ServiceListQueuesSegmentHeaders; }; }; /** Optional parameters. */ export interface QueueCreateOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Optional. Include this parameter to specify that the queue's metadata be returned as part of the response body. Note that metadata requested with this parameter must be stored in accordance with the naming restrictions imposed by the 2009-09-19 version of the Queue service. Beginning with this version, all metadata names must adhere to the naming conventions for C# identifiers. */ metadata?: { [propertyName: string]: string }; } /** Contains response data for the create operation. */ export type QueueCreateResponse = QueueCreateHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: QueueCreateHeaders; }; }; /** Optional parameters. */ export interface QueueDeleteOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the delete operation. */ export type QueueDeleteResponse = QueueDeleteHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: QueueDeleteHeaders; }; }; /** Optional parameters. */ export interface QueueGetPropertiesOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the getProperties operation. */ export type QueueGetPropertiesResponse = QueueGetPropertiesHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: QueueGetPropertiesHeaders; }; }; /** Optional parameters. */ export interface QueueSetMetadataOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Optional. Include this parameter to specify that the queue's metadata be returned as part of the response body. Note that metadata requested with this parameter must be stored in accordance with the naming restrictions imposed by the 2009-09-19 version of the Queue service. Beginning with this version, all metadata names must adhere to the naming conventions for C# identifiers. */ metadata?: { [propertyName: string]: string }; } /** Contains response data for the setMetadata operation. */ export type QueueSetMetadataResponse = QueueSetMetadataHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: QueueSetMetadataHeaders; }; }; /** Optional parameters. */ export interface QueueGetAccessPolicyOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the getAccessPolicy operation. */ export type QueueGetAccessPolicyResponse = QueueGetAccessPolicyHeaders & SignedIdentifier[] & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: SignedIdentifier[]; /** The parsed HTTP response headers. */ parsedHeaders: QueueGetAccessPolicyHeaders; }; }; /** Optional parameters. */ export interface QueueSetAccessPolicyOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** the acls for the queue */ queueAcl?: SignedIdentifier[]; } /** Contains response data for the setAccessPolicy operation. */ export type QueueSetAccessPolicyResponse = QueueSetAccessPolicyHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: QueueSetAccessPolicyHeaders; }; }; /** Optional parameters. */ export interface MessagesDequeueOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Optional. A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. */ numberOfMessages?: number; /** Optional. Specifies the new visibility timeout value, in seconds, relative to server time. The default value is 30 seconds. A specified value must be larger than or equal to 1 second, and cannot be larger than 7 days, or larger than 2 hours on REST protocol versions prior to version 2011-08-18. The visibility timeout of a message can be set to a value later than the expiry time. */ visibilityTimeout?: number; } /** Contains response data for the dequeue operation. */ export type MessagesDequeueResponse = MessagesDequeueHeaders & DequeuedMessageItem[] & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: DequeuedMessageItem[]; /** The parsed HTTP response headers. */ parsedHeaders: MessagesDequeueHeaders; }; }; /** Optional parameters. */ export interface MessagesClearOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the clear operation. */ export type MessagesClearResponse = MessagesClearHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: MessagesClearHeaders; }; }; /** Optional parameters. */ export interface MessagesEnqueueOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Optional. If specified, the request must be made using an x-ms-version of 2011-08-18 or later. If not specified, the default value is 0. Specifies the new visibility timeout value, in seconds, relative to server time. The new value must be larger than or equal to 0, and cannot be larger than 7 days. The visibility timeout of a message cannot be set to a value later than the expiry time. visibilitytimeout should be set to a value smaller than the time-to-live value. */ visibilityTimeout?: number; /** Optional. Specifies the time-to-live interval for the message, in seconds. Prior to version 2017-07-29, the maximum time-to-live allowed is 7 days. For version 2017-07-29 or later, the maximum time-to-live can be any positive number, as well as -1 indicating that the message does not expire. If this parameter is omitted, the default time-to-live is 7 days. */ messageTimeToLive?: number; } /** Contains response data for the enqueue operation. */ export type MessagesEnqueueResponse = MessagesEnqueueHeaders & EnqueuedMessage[] & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: EnqueuedMessage[]; /** The parsed HTTP response headers. */ parsedHeaders: MessagesEnqueueHeaders; }; }; /** Optional parameters. */ export interface MessagesPeekOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** Optional. A nonzero integer value that specifies the number of messages to retrieve from the queue, up to a maximum of 32. If fewer are visible, the visible messages are returned. By default, a single message is retrieved from the queue with this operation. */ numberOfMessages?: number; } /** Contains response data for the peek operation. */ export type MessagesPeekResponse = MessagesPeekHeaders & PeekedMessageItem[] & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The response body as text (string format) */ bodyAsText: string; /** The response body as parsed JSON or XML */ parsedBody: PeekedMessageItem[]; /** The parsed HTTP response headers. */ parsedHeaders: MessagesPeekHeaders; }; }; /** Optional parameters. */ export interface MessageIdUpdateOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; /** A Message object which can be stored in a Queue */ queueMessage?: QueueMessage; } /** Contains response data for the update operation. */ export type MessageIdUpdateResponse = MessageIdUpdateHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: MessageIdUpdateHeaders; }; }; /** Optional parameters. */ export interface MessageIdDeleteOptionalParams extends coreHttp.OperationOptions { /** The The timeout parameter is expressed in seconds. For more information, see <a href="https://docs.microsoft.com/en-us/rest/api/storageservices/setting-timeouts-for-queue-service-operations>Setting Timeouts for Queue Service Operations.</a> */ timeoutInSeconds?: number; /** Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. */ requestId?: string; } /** Contains response data for the delete operation. */ export type MessageIdDeleteResponse = MessageIdDeleteHeaders & { /** The underlying HTTP response. */ _response: coreHttp.HttpResponse & { /** The parsed HTTP response headers. */ parsedHeaders: MessageIdDeleteHeaders; }; }; /** Optional parameters. */ export interface StorageClientOptionalParams extends coreHttp.ServiceClientOptions { /** Specifies the version of the operation to use for this request. */ version?: string; /** Overrides client endpoint. */
endpoint?: string; }
bug.rs
// These functions are used by macro expansion for bug! and span_bug! use crate::ty::{tls, TyCtxt}; use rustc_span::{MultiSpan, Span}; use std::fmt; use std::panic::{panic_any, Location}; #[cold] #[inline(never)] #[track_caller] pub fn bug_fmt(args: fmt::Arguments<'_>) -> ! { // this wrapper mostly exists so I don't have to write a fully // qualified path of None::<Span> inside the bug!() macro definition opt_span_bug_fmt(None::<Span>, args, Location::caller()); } #[cold] #[inline(never)] #[track_caller] pub fn span_bug_fmt<S: Into<MultiSpan>>(span: S, args: fmt::Arguments<'_>) -> ! { opt_span_bug_fmt(Some(span), args, Location::caller()); } #[track_caller] fn opt_span_bug_fmt<S: Into<MultiSpan>>( span: Option<S>, args: fmt::Arguments<'_>, location: &Location<'_>, ) -> ! { tls::with_opt(move |tcx| { let msg = format!("{}: {}", location, args); match (tcx, span) { (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg), (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg), (None, _) => panic_any(msg), } }); unreachable!(); } /// A query to trigger a `delay_span_bug`. Clearly, if one has a `tcx` one can already trigger a /// `delay_span_bug`, so what is the point of this? It exists to help us test `delay_span_bug`'s /// interactions with the query system and incremental. pub fn trigger_delay_span_bug(tcx: TyCtxt<'_>, key: rustc_hir::def_id::DefId) { tcx.sess.delay_span_bug( tcx.def_span(key), "delayed span bug triggered by #[rustc_error(delay_span_bug_from_inside_query)]", ); } pub fn
(providers: &mut crate::ty::query::Providers) { *providers = crate::ty::query::Providers { trigger_delay_span_bug, ..*providers }; }
provide
partial.ts
import { Behavior, BehaviorCode } from "behave/behavior"; export class
extends Behavior { behaviors: Array<Behavior>; selection = 0; constructor(...behaviors: Array<Behavior>){ super(); this.behaviors = behaviors; } // Selects among the given behavior components (one evaluation per Behave call) // Performs an OR-Like behavior and will "fail-over" to each successive component until Success is reached or Failure is certain // -Returns Success if a behavior component returns Success // -Returns Running if a behavior component returns Failure or Running // -Returns Failure if all behavior components returned Failure or an error has occured behave(): BehaviorCode { while(this.selection < this.behaviors.length){ const result = this.behaviors[this.selection].behave(); switch(result){ case BehaviorCode.Failure: this.selection++; this.returnCode = BehaviorCode.Running; return this.returnCode; case BehaviorCode.Success: this.selection = 0; this.returnCode = result; return this.returnCode; case BehaviorCode.Running: this.returnCode = result; return this.returnCode; default: this.selection++; this.returnCode = BehaviorCode.Failure; return this.returnCode; } } this.selection = 0; this.returnCode = BehaviorCode.Failure; return this.returnCode; } }
PartialSelector
googleuserinfo.go
package googleuserinfo import ( "context" "net/http" "github.com/psewda/typing/internal/utils" "github.com/psewda/typing/pkg/errs" "github.com/psewda/typing/pkg/signin/userinfo" oauth2v2 "google.golang.org/api/oauth2/v2" "google.golang.org/api/option" ) // GoogleUserinfo is the userinfo implementation // using google's oauth api. type GoogleUserinfo struct { service *oauth2v2.Service } // Get returns the basic detail of user. It // fetches user detail using google's oauth api. func (gu *GoogleUserinfo) Get() (*userinfo.User, error) { ui, err := gu.service.Userinfo.Get().Do() if err != nil
return &userinfo.User{ ID: ui.Id, Name: ui.Name, Email: ui.Email, Picture: ui.Picture, }, nil } // New creates a new instance of google userinfo. func New(c *http.Client) (*GoogleUserinfo, error) { service, err := oauth2v2.NewService(context.Background(), option.WithHTTPClient(c)) if err != nil { msg := "error while creating new instance of oauth2 service" return nil, utils.Error(msg, err) } return &GoogleUserinfo{ service: service, }, nil }
{ if utils.GetStatusCode(err) == http.StatusUnauthorized { return nil, errs.NewUnauthorizedError() } return nil, utils.Error("error while getting user info", err) }
split.py
import sklearn.model_selection import numpy as np from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit, cross_val_score, StratifiedKFold def normal(X, labels, test_size): """Split a dataset into training and test parts. Args: X (numpy.ndarray): 2D features matrix labels: labels vector test_size: size of the split Returns: A 2D CSP features matrix """ Y = labels X_train, X_test, Y_train, Y_test = \ sklearn.model_selection.train_test_split(X, Y, test_size=test_size, random_state=0) return X_train, X_test, Y_train, Y_test def time_series_split(features, labels, n_splits): """Split a dataset into n splits. """ xx = sklearn.model_selection.TimeSeriesSplit(n_splits) for train_index, test_index in xx.split(features): X_train, X_test = features[train_index], features[test_index] y_train, y_test = labels[train_index], labels[test_index] return X_train, X_test, y_train, y_test def stratified_KFold(features, labels, n_splits): """Stratified K-Folds cross-validator Stratification is the process of rearranging the data as to ensure each fold is a good representative of the whole and by also keeping the balance of classes """ skf = StratifiedKFold(n_splits) skf.get_n_splits(features, labels) for train_index, test_index in skf.split(features, labels): X_train, X_test = features[train_index], features[test_index] Y_train, Y_test = labels[train_index], labels[test_index] return X_train, X_test, Y_train, Y_test #Stratified ShuffleSplit cross-validator def stratified_shuffle_Split(features, labels, n_splits,test_size,random_state):
#Random permutation cross-validator def shuffle_Split(features, labels, n_splits,test_size,random_state): """ShuffleSplit: Random permutation cross-validator """ cv = ShuffleSplit(n_splits, test_size, random_state=random_state) for train_index, test_index in cv.split(features): X_train = features[train_index] X_test = features[test_index] Y_train = labels[train_index] Y_test = labels[test_index] return X_train, X_test, Y_train, Y_test
"""Stratified ShuffleSplit cross-validator """ cv = StratifiedShuffleSplit(n_splits, test_size, random_state=random_state) for train_index, test_index in cv.split(features,labels): X_train = features[train_index] X_test = features[test_index] Y_train = labels[train_index] Y_test = labels[test_index] return X_train, X_test, Y_train, Y_test
stack_minor_version.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class StackMinorVersion(Model): """Application stack minor version. :param display_version: Application stack minor version (display only). :type display_version: str :param runtime_version: Application stack minor version (runtime only). :type runtime_version: str :param is_default: <code>true</code> if this is the default minor version;
:type is_default: bool """ _attribute_map = { 'display_version': {'key': 'displayVersion', 'type': 'str'}, 'runtime_version': {'key': 'runtimeVersion', 'type': 'str'}, 'is_default': {'key': 'isDefault', 'type': 'bool'}, } def __init__(self, **kwargs): super(StackMinorVersion, self).__init__(**kwargs) self.display_version = kwargs.get('display_version', None) self.runtime_version = kwargs.get('runtime_version', None) self.is_default = kwargs.get('is_default', None)
otherwise, <code>false</code>.
expression.go
package clause import ( "database/sql" "database/sql/driver" "go/ast" "reflect" ) // Expression expression interface type Expression interface { Build(builder Builder) } // NegationExpressionBuilder negation expression builder type NegationExpressionBuilder interface { NegationBuild(builder Builder) } // Expr raw expression type Expr struct { SQL string Vars []interface{} WithoutParentheses bool } // Build build raw expression func (expr Expr) Build(builder Builder) { var ( afterParenthesis bool idx int ) for _, v := range []byte(expr.SQL) { if v == '?' && len(expr.Vars) > idx { if afterParenthesis || expr.WithoutParentheses { if _, ok := expr.Vars[idx].(driver.Valuer); ok { builder.AddVar(builder, expr.Vars[idx]) } else { switch rv := reflect.ValueOf(expr.Vars[idx]); rv.Kind() { case reflect.Slice, reflect.Array: if rv.Len() == 0 { builder.AddVar(builder, nil) } else { for i := 0; i < rv.Len(); i++ { if i > 0 { builder.WriteByte(',') } builder.AddVar(builder, rv.Index(i).Interface()) } } default: builder.AddVar(builder, expr.Vars[idx]) } } } else { builder.AddVar(builder, expr.Vars[idx]) } idx++ } else { if v == '(' { afterParenthesis = true } else { afterParenthesis = false } builder.WriteByte(v) } } } // NamedExpr raw expression for named expr type NamedExpr struct { SQL string Vars []interface{} } // Build build raw expression func (expr NamedExpr) Build(builder Builder) { var ( idx int inName bool afterParenthesis bool namedMap = make(map[string]interface{}, len(expr.Vars)) ) for _, v := range expr.Vars { switch value := v.(type) { case sql.NamedArg: namedMap[value.Name] = value.Value case map[string]interface{}: for k, v := range value { namedMap[k] = v } default: var appendFieldsToMap func(reflect.Value) appendFieldsToMap = func(reflectValue reflect.Value) { reflectValue = reflect.Indirect(reflectValue) switch reflectValue.Kind() { case reflect.Struct: modelType := reflectValue.Type() for i := 0; i < modelType.NumField(); i++ { if fieldStruct := modelType.Field(i); ast.IsExported(fieldStruct.Name) { namedMap[fieldStruct.Name] = reflectValue.Field(i).Interface() if fieldStruct.Anonymous { appendFieldsToMap(reflectValue.Field(i)) } } } } } appendFieldsToMap(reflect.ValueOf(value)) } } name := make([]byte, 0, 10) for _, v := range []byte(expr.SQL) { if v == '@' && !inName { inName = true name = []byte{} } else if v == ' ' || v == ',' || v == ')' || v == '"' || v == '\'' || v == '`' || v == '\n' { if inName { if nv, ok := namedMap[string(name)]; ok { builder.AddVar(builder, nv) } else { builder.WriteByte('@') builder.WriteString(string(name)) } inName = false } afterParenthesis = false builder.WriteByte(v) } else if v == '?' && len(expr.Vars) > idx { if afterParenthesis { if _, ok := expr.Vars[idx].(driver.Valuer); ok { builder.AddVar(builder, expr.Vars[idx]) } else { switch rv := reflect.ValueOf(expr.Vars[idx]); rv.Kind() { case reflect.Slice, reflect.Array: if rv.Len() == 0 { builder.AddVar(builder, nil) } else { for i := 0; i < rv.Len(); i++ { if i > 0 { builder.WriteByte(',') } builder.AddVar(builder, rv.Index(i).Interface()) } } default: builder.AddVar(builder, expr.Vars[idx]) } } } else { builder.AddVar(builder, expr.Vars[idx]) } idx++ } else if inName { name = append(name, v) } else { if v == '(' { afterParenthesis = true } else { afterParenthesis = false } builder.WriteByte(v) } } if inName { builder.AddVar(builder, namedMap[string(name)]) } } // IN Whether a value is within a set of values type IN struct { Column interface{} Values []interface{} } func (in IN) Build(builder Builder) { builder.WriteQuoted(in.Column) switch len(in.Values) { case 0: builder.WriteString(" IN (NULL)") case 1: if _, ok := in.Values[0].([]interface{}); !ok { builder.WriteString(" = ") builder.AddVar(builder, in.Values[0]) break } fallthrough default: builder.WriteString(" IN (") builder.AddVar(builder, in.Values...) builder.WriteByte(')') } } func (in IN) NegationBuild(builder Builder) { switch len(in.Values) { case 0: case 1: if _, ok := in.Values[0].([]interface{}); !ok { builder.WriteQuoted(in.Column) builder.WriteString(" <> ") builder.AddVar(builder, in.Values[0]) break } fallthrough default: builder.WriteQuoted(in.Column) builder.WriteString(" NOT IN (") builder.AddVar(builder, in.Values...) builder.WriteByte(')') } } // Eq equal to for where type Eq struct { Column interface{} Value interface{} } func (eq Eq) Build(builder Builder) { builder.WriteQuoted(eq.Column) if eqNil(eq.Value) { builder.WriteString(" IS NULL") } else { builder.WriteString(" = ") builder.AddVar(builder, eq.Value) } } func (eq Eq) NegationBuild(builder Builder) { Neq(eq).Build(builder) } // Neq not equal to for where type Neq Eq func (neq Neq) Build(builder Builder) { builder.WriteQuoted(neq.Column) if eqNil(neq.Value) { builder.WriteString(" IS NOT NULL") } else { builder.WriteString(" <> ") builder.AddVar(builder, neq.Value) } } func (neq Neq) NegationBuild(builder Builder) { Eq(neq).Build(builder) } // Gt greater than for where type Gt Eq func (gt Gt) Build(builder Builder) { builder.WriteQuoted(gt.Column) builder.WriteString(" > ") builder.AddVar(builder, gt.Value) } func (gt Gt) NegationBuild(builder Builder) { Lte(gt).Build(builder) } // Gte greater than or equal to for where type Gte Eq func (gte Gte) Build(builder Builder) { builder.WriteQuoted(gte.Column) builder.WriteString(" >= ") builder.AddVar(builder, gte.Value) } func (gte Gte) NegationBuild(builder Builder) { Lt(gte).Build(builder) } // Lt less than for where type Lt Eq func (lt Lt) Build(builder Builder) { builder.WriteQuoted(lt.Column) builder.WriteString(" < ") builder.AddVar(builder, lt.Value) } func (lt Lt) NegationBuild(builder Builder) { Gte(lt).Build(builder) } // Lte less than or equal to for where type Lte Eq func (lte Lte) Build(builder Builder) { builder.WriteQuoted(lte.Column) builder.WriteString(" <= ") builder.AddVar(builder, lte.Value) } func (lte Lte) NegationBuild(builder Builder) { Gt(lte).Build(builder) } // Like whether string matches regular expression type Like Eq func (like Like) Build(builder Builder) { builder.WriteQuoted(like.Column) builder.WriteString(" LIKE ") builder.AddVar(builder, like.Value) } func (like Like) NegationBuild(builder Builder) { builder.WriteQuoted(like.Column) builder.WriteString(" NOT LIKE ") builder.AddVar(builder, like.Value) } func eqNil(value interface{}) bool
func eqNilReflect(value interface{}) bool { reflectValue := reflect.ValueOf(value) return reflectValue.Kind() == reflect.Ptr && reflectValue.IsNil() }
{ if valuer, ok := value.(driver.Valuer); ok { value, _ = valuer.Value() } return value == nil || eqNilReflect(value) }
log.py
# Copyright (c) 2021 Graphcore Ltd. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities. """ import csv import datetime import json import logging import os import random import subprocess import numpy as np import tensorflow as tf from tensorflow import pywrap_tensorflow # Set Python logger # Match TensorFlow's default logging format. logFormatter = logging.Formatter( '%(asctime)s.%(msecs)06d: %(levelname)-1.1s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') logger = logging.getLogger() logger.setLevel(logging.INFO) consoleHandler = logging.StreamHandler() consoleHandler.setFormatter(logFormatter) logger.addHandler(consoleHandler) def get_logger(): return logger def set_log_file_path(log_file_path): global logger fileHandler = logging.FileHandler(log_file_path) fileHandler.setFormatter(logFormatter) logger.addHandler(fileHandler) def add_arguments(parser): group = parser.add_argument_group('Logging') group.add_argument('--log-dir', type=str, default="./logs/", help="Log and weights save directory") group.add_argument('--name-suffix', type=str, help="Suffix added to name string") group.add_argument('--steps-per-logs', type=int, default=1, help="Logs per epoch (if number of epochs specified)") group.add_argument('--steps-per-tensorboard', type=int, default=0, help='Number of steps between saving statistics to TensorBoard. 0 to disable.') return parser def set_defaults(opts): name = opts['name'] if opts["name_suffix"]: name = name + "_" + opts["name_suffix"] if opts.get("poplar_version"): v = opts['poplar_version'] # name += "_v" + v[v.find("version ") + 8: v.rfind(' ')] name += "_v" + v[v.find("version ") + 8: v.find(' (')] # We want this to be random even if random seeds have been set so that we don't overwrite # when re-running with the same seed random_state = random.getstate() random.seed() random.setstate(random_state) # System time with milliseconds time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3] name += "_{}".format(time) if not os.path.isdir(opts["save_path"]): os.makedirs(opts["save_path"], exist_ok=True) opts["logs_path"] = os.path.join(opts["save_path"], name) opts["checkpoint_path"] = os.path.join(opts["save_path"], name, 'ckpt') if not os.path.isdir(opts["logs_path"]): os.makedirs(opts["logs_path"], exist_ok=True) set_log_file_path(os.path.join(opts['logs_path'], 'log.txt')) with open(os.path.join(opts["logs_path"], 'arguments.json'), 'w') as fp: json.dump(opts, fp, sort_keys=True, indent=4, separators=(',', ': ')) return opts def write_to_csv(d, write_header, training, logs_path): if logs_path: filename = 'training.csv' if training else 'validation.csv' with open(os.path.join(logs_path, filename), 'a+') as f: w = csv.DictWriter(f, d.keys()) if write_header: w.writeheader() w.writerow(d) def print_trainable_variables(logs_path): logger.info('Trainable Variables:') total_parameters = 0 for variable in tf.trainable_variables(): logger.info(variable) variable_parameters = 1 for DIM in variable.get_shape(): variable_parameters *= DIM.value total_parameters += variable_parameters logger.info('Total Parameters:' + str(total_parameters) + '\n') def make_histogram(values, bins=512): # From https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514 # License: BSD License 2.0 # Author Michael Gygli # Logs the histogram of a list/vector of values. # Convert to a numpy array values = np.array(values) # Create histogram using numpy counts, bin_edges = np.histogram(values, bins=bins) # Fill fields of histogram proto hist = tf.HistogramProto() hist.min = float(np.min(values)) hist.max = float(np.max(values)) hist.num = int(np.prod(values.shape)) hist.sum = float(np.sum(values)) hist.sum_squares = float(np.sum(values**2)) # Requires equal number as bins, where the first goes from -DBL_MAX to bin_edges[1] # See https://github.com/tensorflow/tensorflow/blob/r2.6/tensorflow/core/framework/summary.proto#L30 # Thus, we drop the start of the first bin bin_edges = bin_edges[1:] # Add bin edges and counts for edge in bin_edges: hist.bucket_limit.append(edge) for c in counts: hist.bucket.append(c) # Create and write Summary return hist # return tf.Summary.Value(tag=tag, histo=hist) def save_model_statistics(checkpoint_path, summary_writer, step=0): initializers = load_initializers_from_checkpoint(checkpoint_path) summary = tf.Summary() for name, np_weight in initializers.items(): name = name.replace(":", "_") tensor = np_weight.astype(np.float32) if not np.any(np.isnan(tensor)): summary.value.add(tag=name, histo=make_histogram(tensor)) summary.value.add(tag=f"L2/{name}", simple_value=np.linalg.norm(tensor)) summary_writer.add_summary(summary, step) summary_writer.flush() def load_initializers_from_checkpoint(checkpoint_path): initializers = {} reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path) var_to_map = reader.get_variable_to_shape_map() for key, dim in var_to_map.items(): if key == 'global_step': continue # if reader.get_tensor(key).dtype.name == 'float16': # int_data = np.asarray(reader.get_tensor(key), np.int32) # np_weight = int_data.view(dtype=np.float16).reshape(dim) # else: np_weight = reader.get_tensor(key) initializers[key] = np_weight return initializers def get_git_revision():
return subprocess.check_output(["git", "describe", "--always", "--dirty"]).strip().decode()
spotify.py
import sys import requests from . import settings spotify_base = "https://api.spotify.com/v1" def get_spotipy_token(): import spotipy.util as util params = get_spotify_auth_params() return util.prompt_for_user_token(**params) def get_headers(token): return {"Authorization": "Bearer %s" % (token)} def get_spotify_auth_params(): return { "client_id": settings.get("SPOTIPY_CLIENT_ID"), "client_secret": settings.get("SPOTIPY_CLIENT_SECRET"), "redirect_uri": settings.get("SPOTIPY_REDIRECT_URI"), "username": settings.get("SPOTIPY_USERNAME"), "scope": " ".join(list(settings.get("SPOTIPY_SCOPES"))), } def
(): """Ensure user set needed API config :returns: None """ if not settings.ensure_all(): print("You must set your Spotify's app config!") print("Run spotify-cli config") sys.exit() def get_spotipy(): ensure_settings() import spotipy token = get_spotipy_token() return spotipy.Spotify(auth=token) def previous_track(token): url = "%s/me/player/previous" % (spotify_base) r = requests.post(url, headers=get_headers(token)) return r.status_code == 204 def next_track(token): url = "%s/me/player/next" % (spotify_base) r = requests.post(url, headers=get_headers(token)) return r.status_code == 204 def get_current_playback(token): url = "%s/me/player/currently-playing" % (spotify_base) r = requests.get(url, headers=get_headers(token)) return r.json() def pause_playback(token): url = "%s/me/player/pause" % (spotify_base) r = requests.put(url, headers=get_headers(token)) return r.status_code == 204 def resume_playback(token): url = "%s/me/player/play" % (spotify_base) r = requests.put(url, headers=get_headers(token)) return r.status_code == 204
ensure_settings
RosViewer.py
#!/usr/bin/env python3 # RosViewer.py = node that listens to a ROS image message topic, # and displays the image using OpenCV. import rospy import cv2 from sensor_msgs.msg import Image from cv_bridge import CvBridge, CvBridgeError class image_viewer: # "/camera/color/image_raw" or "/camera/color/video" def __init__(self): self.bridge = CvBridge() self.image_sub = rospy.Subscriber("/camera/color/image_raw", Image, self.ros_cb, queue_size=1, buff_size=2 ** 24) def ros_cb(self,msg):
cv2.imshow("Ros video", cv_image) key = cv2.waitKey(10) # in milliseconds if key == 113: # 113 is the letter 'q' cv2.destroyAllWindows() rospy.signal_shutdown("Quitting") print("Starting Ros video image_viewer v1.2 ; press q to quit in video-window.") rospy.init_node('image_viewer', anonymous=True) iv = image_viewer() rospy.spin() print("Finished")
cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8") # in msg.data as "rgb8" but is "bgr8" from RS camera ??
pagination.rs
use diesel::pg::Pg; use diesel::prelude::*; use diesel::query_builder::*; use diesel::query_dsl::methods::LoadQuery; use diesel::sql_types::BigInt; const DEFAULT_PER_PAGE: i64 = 50; pub trait Paginate: Sized { fn paginate(self, page: i64) -> Paginated<Self>; } impl<T> Paginate for T { fn paginate(self, page: i64) -> Paginated<Self>
} #[derive(Debug, Clone, Copy, QueryId)] pub struct Paginated<T> { query: T, page: i64, per_page: i64, } impl<T> Paginated<T> { pub fn per_page(self, per_page: i64) -> Self { Paginated { per_page, ..self } } pub fn load_and_count_pages<U>(self, conn: &PgConnection) -> Result<(Vec<U>, i64), diesel::result::Error> where Self: LoadQuery<PgConnection, (U, i64)>, { let mut per_page = self.per_page; if per_page <= 0 { per_page = DEFAULT_PER_PAGE; } let results = self.load::<(U, i64)>(conn)?; let total = results.get(0).map(|x| x.1).unwrap_or(0); let records = results.into_iter().map(|x| x.0).collect(); let _total_pages = (total as f64 / per_page as f64).ceil() as i64; Ok((records, total)) } } impl<T: Query> Query for Paginated<T> { type SqlType = (T::SqlType, BigInt); } impl<T> RunQueryDsl<PgConnection> for Paginated<T> {} impl<T> QueryFragment<Pg> for Paginated<T> where T: QueryFragment<Pg>, { fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> { out.push_sql("SELECT *, COUNT(*) OVER () FROM ("); self.query.walk_ast(out.reborrow())?; out.push_sql(") t LIMIT "); out.push_bind_param::<BigInt, _>(&self.per_page)?; out.push_sql(" OFFSET "); let offset = (self.page) * self.per_page; out.push_bind_param::<BigInt, _>(&offset)?; Ok(()) } }
{ Paginated { query: self, per_page: DEFAULT_PER_PAGE, page, } }
Card.tsx
import * as React from 'react' import clsx from 'clsx' export interface Props { className?: string | Record<string, unknown> } const Card: React.FC<Props> = ({ className, children }) => {
return ( <div className={clsx( 'rounded shadow-md', 'p-4 sm:p-6 md:p-8', 'bg-white dark:bg-gray-800', 'border border-gray-200 dark:border-gray-700', className )} > {children} </div> ) } export default Card
mod.rs
pub use self::asc_ptr::AscPtr; use std::mem::size_of; use wasmi; pub mod asc_ptr; pub mod class; ///! Facilities for creating and reading objects on the memory of an ///! AssemblyScript (Asc) WASM module. Objects are passed through ///! the `asc_new` and `asc_get` methods of an `AscHeap` implementation. ///! These methods take types that implement `To`/`FromAscObj` and are ///! therefore convertible to/from an `AscType`. ///! Implementations of `AscType` live in the `class` module. ///! Implementations of `To`/`FromAscObj` live in the `to_from` module. /// WASM is little-endian, and for simplicity we currently assume that the host /// is also little-endian. #[cfg(target_endian = "big")] compile_error!("big-endian targets are currently unsupported"); /// A type that can read and write to the Asc heap. Call `asc_new` and `asc_get` /// for reading and writing Rust structs from and to Asc. /// /// The implementor must provide the direct Asc interface with `raw_new` and `get`. pub trait AscHeap: Sized { /// Allocate new space and write `bytes`, return the allocated address. fn raw_new(&mut self, bytes: &[u8]) -> Result<u32, wasmi::Error>; /// Just like `wasmi::MemoryInstance::get`. fn get(&self, offset: u32, size: u32) -> Result<Vec<u8>, wasmi::Error>; /// Instatiate `rust_obj` as an Asc object of class `C`. /// Returns a pointer to the Asc heap. /// /// This operation is expensive as it requires a call to `raw_new` for every /// nested object. fn asc_new<C, T: ?Sized>(&mut self, rust_obj: &T) -> AscPtr<C> where C: AscType, T: ToAscObj<C>, { AscPtr::alloc_obj(&rust_obj.to_asc_obj(self), self) } /// Read the rust representation of an Asc object of class `C`. /// /// This operation is expensive as it requires a call to `get` for every /// nested object. fn asc_get<T, C>(&self, asc_ptr: AscPtr<C>) -> T where C: AscType, T: FromAscObj<C>, { T::from_asc_obj(asc_ptr.read_ptr(self), self) } } /// Type that can be converted to an Asc object of class `C`. pub trait ToAscObj<C: AscType> { fn to_asc_obj<H: AscHeap>(&self, heap: &mut H) -> C; } /// Type that can be converted from an Asc object of class `C`. pub trait FromAscObj<C: AscType> { fn from_asc_obj<H: AscHeap>(obj: C, heap: &H) -> Self; } // `AscType` is not really public, implementors should live inside the `class` module. /// A type that has a direct corespondence to an Asc type. /// /// This can be derived for structs that are `#[repr(C)]`, contain no padding /// and whose fields are all `AscValue`. Enums can derive if they are `#[repr(u32)]`. /// /// Special classes like `ArrayBuffer` use custom impls. /// /// See https://github.com/graphprotocol/graph-node/issues/607 for more considerations. pub trait AscType: Sized { /// Transform the Rust representation of this instance into an sequence of /// bytes that is precisely the memory layout of a corresponding Asc instance. fn to_asc_bytes(&self) -> Vec<u8>; /// The Rust representation of an Asc object as layed out in Asc memory. fn from_asc_bytes(asc_obj: &[u8]) -> Self; /// Size of the corresponding Asc instance in bytes. fn asc_size<H: AscHeap>(_ptr: AscPtr<Self>, _heap: &H) -> u32 { size_of::<Self>() as u32 } } // `AscValue` also isn't really public. /// An Asc primitive or an `AscPtr` into the Asc heap. A type marked as /// `AscValue` must have the same byte representation in Rust and Asc, including /// same size, and size must be equal to alignment. pub trait AscValue: AscType + Copy + Default {} impl AscType for bool { fn to_asc_bytes(&self) -> Vec<u8> { vec![*self as u8] } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); asc_obj[0] != 0 } } impl AscType for i8 { fn to_asc_bytes(&self) -> Vec<u8> { vec![*self as u8] } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); asc_obj[0] as i8 } } impl AscType for i16 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([asc_obj[0], asc_obj[1]]) } } impl AscType for i32 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([asc_obj[0], asc_obj[1], asc_obj[2], asc_obj[3]]) } } impl AscType for i64 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([ asc_obj[0], asc_obj[1], asc_obj[2], asc_obj[3], asc_obj[4], asc_obj[5], asc_obj[6], asc_obj[7], ]) } } impl AscType for u8 { fn to_asc_bytes(&self) -> Vec<u8> { vec![*self] } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); asc_obj[0] } } impl AscType for u16 { fn
(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([asc_obj[0], asc_obj[1]]) } } impl AscType for u32 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([asc_obj[0], asc_obj[1], asc_obj[2], asc_obj[3]]) } } impl AscType for u64 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_le_bytes().to_vec() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { assert_eq!(asc_obj.len(), size_of::<Self>()); Self::from_le_bytes([ asc_obj[0], asc_obj[1], asc_obj[2], asc_obj[3], asc_obj[4], asc_obj[5], asc_obj[6], asc_obj[7], ]) } } impl AscType for f32 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_bits().to_asc_bytes() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { Self::from_bits(u32::from_asc_bytes(asc_obj)) } } impl AscType for f64 { fn to_asc_bytes(&self) -> Vec<u8> { self.to_bits().to_asc_bytes() } fn from_asc_bytes(asc_obj: &[u8]) -> Self { Self::from_bits(u64::from_asc_bytes(asc_obj)) } } impl AscValue for bool {} impl AscValue for i8 {} impl AscValue for i16 {} impl AscValue for i32 {} impl AscValue for i64 {} impl AscValue for u8 {} impl AscValue for u16 {} impl AscValue for u32 {} impl AscValue for u64 {} impl AscValue for f32 {} impl AscValue for f64 {}
to_asc_bytes
datetime.rs
use chrono::{DateTime, Duration, TimeZone, Utc}; use lazy_static::lazy_static; use std::time::SystemTime; // TODO: This is not entirely accurate for any date before Feb 28th, 1900 I believe. // We should be more precise. But this seems to be the implementation of SheetJS. // https://github.com/tafia/calamine/issues/116 lazy_static! { static ref EXCEL_EPOCH: DateTime<Utc> = Utc.ymd(1899, 12, 30).and_hms(0, 0, 0); } pub fn to_excel_datetime(instant: SystemTime) -> f64 { let instant_datetime: DateTime<Utc> = instant.into(); let duration_since_excel_epoch = instant_datetime.signed_duration_since(*EXCEL_EPOCH); let days = duration_since_excel_epoch.num_days(); let milliseconds = duration_since_excel_epoch.num_milliseconds(); let milliseconds_after_days = milliseconds - Duration::days(days).num_milliseconds(); let fraction = Duration::milliseconds(milliseconds_after_days).num_milliseconds() as f64 / Duration::days(1).num_milliseconds() as f64; days as f64 + fraction } // Can be included in the main binary once there is a function that consumes this #[cfg(test)] pub fn from_excel_datetime(since_excel_epoch: f64) -> SystemTime { let days_since_excel_epoch = Duration::days(since_excel_epoch.floor() as i64); let fraction = since_excel_epoch - since_excel_epoch.floor(); let fraction_milliseconds = (Duration::days(1).num_milliseconds() as f64 * fraction).floor() as i64;
SystemTime::from(datetime) } // TODO: Add roundtrip tests. Should be fairly straightforward. #[cfg(test)] mod tests { use crate::functions::eval_helper::datetime::{from_excel_datetime, to_excel_datetime}; use chrono::{TimeZone, Utc}; use std::time::SystemTime; #[test] fn test_to_and_from_excel_datetime_whole_days() { let instant = SystemTime::from(Utc.ymd(2000, 1, 1).and_hms_milli(0, 0, 0, 0)); let result = to_excel_datetime(instant); assert_eq!(result, 36526.0); assert_eq!(from_excel_datetime(result), instant); } #[test] fn test_to_and_from_excel_datetime_fractional() { let instant = SystemTime::from(Utc.ymd(2000, 1, 1).and_hms(1, 1, 1)); let result = to_excel_datetime(instant); assert_eq!(result, 36526.04237268519); assert_eq!(from_excel_datetime(result), instant); } }
let datetime = *EXCEL_EPOCH + days_since_excel_epoch + Duration::milliseconds(fraction_milliseconds);
identifier.rs
//! Module for types and utilities related to dealing with identifiers. use crate::prelude::*; use ast::crumbs::Located; use std::cmp::Ordering; // ================== // === Identifier === // ================== // === Errors === #[allow(missing_docs)] #[derive(Clone, Debug, Fail)] #[fail(display = "Identifier contains operator `{}`, so it cannot be made into var.", _0)] pub struct OperatorCantBeMadeIntoVar(String); #[allow(missing_docs)] #[derive(Clone, Debug, Fail)] #[fail(display = "The `{}` is not a valid identifier.", _0)] pub struct NotAnIdentifier(String); #[allow(missing_docs)] #[derive(Clone, Copy, Debug, Fail)] #[fail(display = "Empty string is not a valid identifier.")] pub struct IdentifierCannotBeEmpty; // === Definition === /// Wrapper over an Ast that holds an atomic identifier of any kind. /// /// Comparisons compare the underlying name strings. /// /// Invariants: can get identifier name, the name is non-empty. #[derive(Clone, Debug, Shrinkwrap)] pub struct Identifier(Ast); impl Identifier { /// Wrap the `Ast` into `Identifier` if it actually is an identifier. pub fn new(ast: Ast) -> Option<Self> { let name = ast::identifier::name(&ast)?; (!name.is_empty()).as_some(Identifier(ast)) } /// Convert given text into an identifier Ast and wrap. /// /// Can fail if a given string is not a valid identifier, however the exact scope of validation /// is currently unspecified. pub fn from_text(text: impl Into<String>) -> FallibleResult<Self> { // TODO? [mwu] // We should be able to call parser or sth to verify that other requirements for the // referent form identifiers are fulfilled. // This is expected to become properly possible when the Rust rewrite of parser is done. // See: https://github.com/enso-org/enso/issues/435 // On the other hand it is not clear how strict we want to be here, so as not to break // processing invalid syntactically code. let text = text.into(); let empty_string_error = failure::Error::from(IdentifierCannotBeEmpty); let first_char = text.chars().next().ok_or(empty_string_error)?; match first_char { c if c.is_lowercase() => Ok(Ast::var(text)), c if c.is_uppercase() => Ok(Ast::cons(text)), c if ast::opr::SYMBOLS.contains(&c) => Ok(Ast::opr(text)), _ => Err(NotAnIdentifier(text).into()), } .map(Identifier) } /// Get the identifier name. pub fn name(&self) -> &str { // Unwrap here is safe, as identifiers always allow obtaining an Identifier. ast::identifier::name(&self.0).unwrap() } /// Convert identifier to the variable form (i.e. non-referent). Fails if this is an operator. pub fn as_var(&self) -> Result<ast::Var, OperatorCantBeMadeIntoVar> { let name = self.name(); // Unwrap below is safe, as identifier is always non-empty. let first_char = name.chars().next().unwrap(); if first_char.is_alphabetic() { let name = name.to_lowercase(); Ok(ast::Var { name }) } else { Err(OperatorCantBeMadeIntoVar(name.to_owned())) } } /// Get a normalized version of this identifier. pub fn normalized(&self) -> NormalizedName { NormalizedName::new(self.name()) } /// Get the identifier's node with a newly assigned, unique id. /// /// This is needed if the identifier from AST is to be reused in a different part of the tree. /// Cloning it without generating a new ID would introduce two nodes with same id. pub fn with_new_id(&self) -> Self { Self(self.0.with_new_id()) } } // === Implementations === impl PartialOrd for Identifier { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { self.name().partial_cmp(other.name()) } } impl Ord for Identifier { fn cmp(&self, other: &Self) -> Ordering { self.name().cmp(other.name()) } } impl TryFrom<String> for Identifier { type Error = failure::Error; fn try_from(value: String) -> Result<Self, Self::Error> { Identifier::from_text(value) } } impl From<Identifier> for String { fn from(value: Identifier) -> Self { value.name().into() } } impl TryFrom<&str> for Identifier { type Error = failure::Error; fn try_from(value: &str) -> Result<Self, Self::Error> { Identifier::from_text(value) } } impl From<ast::known::Var> for Identifier { fn from(value: ast::known::Var) -> Self { Identifier(value.into()) } } impl From<ast::known::Cons> for Identifier { fn from(value: ast::known::Cons) -> Self { Identifier(value.into()) } } impl From<ast::known::Opr> for Identifier { fn from(value: ast::known::Opr) -> Self { Identifier(value.into()) } } impl From<Identifier> for Ast { fn from(value: Identifier) -> Self { value.0 } } impl From<&Identifier> for Ast { fn from(value: &Identifier) -> Self { value.0.clone() } } impl Display for Identifier { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { Display::fmt(&self.name(), f) } } impl PartialEq for Identifier { fn eq(&self, other: &Self) -> bool { self.name().eq(other.name()) } } impl Eq for Identifier {} impl Hash for Identifier { fn hash<H: Hasher>(&self, state: &mut H) { self.name().hash(state) } } // ==================== // === ReferentName === // ==================== // === Errors === /// Happens if a given string does not fulfill requirements of the referent name; #[derive(Clone, Debug, Fail)] #[fail(display = "The `{}` is not a valid referent name.", _0)] pub struct NotReferentName(String); // === Definition === /// The name segment is a string that starts with an upper-cased character. /// /// It is used for naming modules, module path segments and projects. /// /// This value corresponds to contents of the `Cons` AST shape. #[derive(Clone, Debug, Display, Shrinkwrap, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ReferentName(String); impl ReferentName { /// Check if the given text would be a valid referent name; pub fn validate(name: impl AsRef<str>) -> Result<(), NotReferentName> { let name = name.as_ref(); let first_char = name.chars().next(); match first_char { Some(c) if c.is_uppercase() => Ok(()), _ => Err(NotReferentName(name.into())), } } /// Try interpreting given string as a referent name. /// /// Referent name is an identifier starting with an upper-cased letter, like `Maybe`. /// /// Fails if the given string is not a valid referent name (e.g. an empty string or lower-cased /// string). pub fn new(name: impl Str) -> Result<ReferentName, NotReferentName> { Self::validate(name.as_ref()).map(|_| ReferentName(name.into())) } /// Convert given string into a referent name. /// /// First letter of each "word" (underscore-separated segment) will be capitalized. All other /// letters will be turned into lower case. /// /// Fails if the given string is empty. pub fn from_identifier_text(name: impl Str) -> Result<Self, NotReferentName> { let name = name.as_ref(); if name.is_empty() { return Err(NotReferentName(name.into())); } let name = name.split('_').map(str::to_lowercase).map(capitalize_first).join("_"); Ok(Self(name)) } /// Get a normalized version of this identifier. pub fn normalized(&self) -> NormalizedName { NormalizedName::new(self) } } // === Implementations === impl AsRef<str> for ReferentName { fn as_ref(&self) -> &str { self.0.as_ref() } } impl TryFrom<&str> for ReferentName { type Error = NotReferentName; fn try_from(value: &str) -> Result<Self, Self::Error> { Self::new(value) } } impl TryFrom<String> for ReferentName { type Error = NotReferentName; fn try_from(value: String) -> Result<Self, Self::Error> { Self::new(value) } } impl From<ReferentName> for String { fn from(name: ReferentName) -> Self { name.0 } } impl From<&ReferentName> for String { fn from(name: &ReferentName) -> Self { name.0.clone() } } impl PartialEq<String> for ReferentName { fn eq(&self, other: &String) -> bool
} impl PartialEq<&str> for ReferentName { fn eq(&self, other: &&str) -> bool { &self.0 == other } } // ====================== // === NormalizedName === // ====================== // === Definition === /// The identifier name normalized to a lower-case (as the comparisons are case-insensitive). /// Implements case-insensitive compare with AST. #[derive(Clone, Debug, Display, Hash, PartialEq, Eq)] #[derive(Shrinkwrap)] pub struct NormalizedName(String); impl NormalizedName { /// Wraps given string into the normalized name. pub fn new(name: impl AsRef<str>) -> NormalizedName { let name = name.as_ref().to_lowercase(); NormalizedName(name) } /// If the given AST is an identifier, returns its normalized name. pub fn try_from_ast(ast: &Ast) -> Option<NormalizedName> { ast::identifier::name(ast).map(NormalizedName::new) } /// Is the given string a prefix of this name. pub fn starts_with(&self, name: impl AsRef<str>) -> bool { let prefix = NormalizedName::new(name); self.0.starts_with(prefix.0.as_str()) } } // === Implementations === /// Tests if Ast is identifier that might reference the same name (case insensitive match). impl PartialEq<Ast> for NormalizedName { fn eq(&self, other: &Ast) -> bool { NormalizedName::try_from_ast(other).contains_if(|other_name| other_name == self) } } impl From<NormalizedName> for String { fn from(name: NormalizedName) -> Self { name.0 } } /// Case-insensitive identifier with its ast crumb location (relative to the node's ast). pub type LocatedName = Located<NormalizedName>; // ================= // === Utilities === // ================= /// Generate an identifier name that is not present in the given sequence. /// /// The name is generated by taking `base` string and appending subsequent integers. pub fn generate_name( base: impl AsRef<str>, unavailable: impl IntoIterator<Item = NormalizedName>, ) -> FallibleResult<Identifier> { let base = base.as_ref(); let is_relevant = |name: &NormalizedName| name.starts_with(base); let unavailable = unavailable.into_iter().filter(is_relevant).collect::<HashSet<_>>(); let name = (1..) .find_map(|i| { let candidate = NormalizedName::new(iformat!("{base}{i}")); let available = !unavailable.contains(&candidate); available.as_some(candidate) }) .unwrap(); // It never yields `None`, as we iterate infinite sequence until we find match. Identifier::from_text(name) } /// Capitalize the first letter of the passed string. fn capitalize_first(string: String) -> String { let mut chars = string.chars(); match chars.next() { None => String::new(), Some(first_char) => first_char.to_uppercase().to_string() + chars.as_str(), } } // ============= // === Tests === // ============= #[cfg(test)] mod tests { use super::*; #[test] fn referent_name_from_identifier_text() { let cases = [ ("identifier", "Identifier"), ("project_1", "Project_1"), ("muLti_Word_iDenTiFier", "Multi_Word_Identifier"), ]; for (input, expected) in cases { let referent_name = ReferentName::from_identifier_text(input).expect("ReferentName creation failed"); assert_eq!(referent_name, expected); } assert!(ReferentName::from_identifier_text("").is_err()); } }
{ &self.0 == other }
index.d.ts
/** An error thrown by the API. */ declare class
extends Error { message: string } interface GraphqlData { query: string operationName?: string variables?: object } declare const slothpixel: { SlothpixelError: typeof SlothpixelError /** Send a request to the Slothpixel API. @param endpoint The [API endpoint](https://docs.slothpixel.me) to call. @param options The options to pass to the API. @example ``` const slothpixel = require("slothpixel"); (async () => { const { uuid } = await slothpixel("players/Richienb"); console.log(uuid); //=> "56da43a4088d4a7682b6dd431535015e" })(); ``` */ <ReturnType = object | object[]>(endpoint: string, options?: Record<string, string | number | boolean>): Promise<ReturnType> /** Send a request to the Slothpixel Graphql API. @param data The graphql data to send. @example ``` const slothpixel = require("slothpixel"); (async () => { const query = `{ players { player(player_name: "Richienb") { uuid } } }`; const data = await slothpixel.graphql({ query }); console.log(data.players.player.uuid) //=> "56da43a4088d4a7682b6dd431535015e" })(); ``` */ graphql<ReturnType = object>(data: GraphqlData): Promise<ReturnType> } export = slothpixel
SlothpixelError
OapiAttendanceShiftSearchRequest.py
''' Created by auto_sdk on 2019.07.31 ''' from dingtalk.api.base import RestApi class OapiAttendanceShiftSearchRequest(RestApi): def __init__(self,url=None): RestApi.__init__(self,url) self.op_user_id = None
def getHttpMethod(self): return 'POST' def getapiname(self): return 'dingtalk.oapi.attendance.shift.search'
self.shift_name = None
ippool.go
package types import ( "fmt" cnet "github.com/projectcalico/libcalico-go/lib/net" "yunion.io/x/pkg/errors" ) const ( LabelManaged = "yunion.io/managed" LabelManagedValueAgent = "calico-node-agent" ) type NodeIPPool struct { // The node ip pool CIDR. CIDR string `json:"cidr"` } func (pool NodeIPPool) Validate() error { if pool.CIDR == ""
_, _, err := pool.GetIPAndNet() if err != nil { return errors.Wrap(err, "Get pool IPAndNet") } return nil } func (pool NodeIPPool) GetCIDR() (string, error) { if err := pool.Validate(); err != nil { return "", err } ip, ipNet, err := pool.GetIPAndNet() if err != nil { return "", err } maskLen, _ := ipNet.Mask.Size() return fmt.Sprintf("%s/%d", ip.To4().String(), maskLen), nil } func (pool NodeIPPool) GetIPAndNet() (*cnet.IP, *cnet.IPNet, error) { ip, ipnet, err := cnet.ParseCIDROrIP(pool.CIDR) if err != nil { return nil, nil, errors.Wrapf(err, "ParseCIDROrIP %s", pool.CIDR) } return ip, ipnet, nil } type NodeIPPools []NodeIPPool
{ return errors.Errorf("CIDR is empty") }
mod.rs
//! Within this module, types commonly use the following abbreviations: //! //! F: From Clause //! S: Select Clause //! D: Distinct Clause //! W: Where Clause //! O: Order By Clause //! L: Limit Clause //! Of: Offset Clause //! G: Group By Clause //! LC: For Update Clause #![allow(missing_docs)] // The missing_docs lint triggers even though this is hidden mod boxed; mod dsl_impls; pub use self::boxed::BoxedSelectStatement; use super::distinct_clause::NoDistinctClause; use super::group_by_clause::NoGroupByClause; use super::limit_clause::NoLimitClause; use super::locking_clause::NoLockingClause; use super::offset_clause::NoOffsetClause; use super::order_clause::NoOrderClause; use super::select_clause::*; use super::where_clause::*; use super::{AstPass, Query, QueryFragment}; use backend::Backend; use expression::subselect::ValidSubselect; use expression::*; use query_builder::SelectQuery; use query_source::joins::{AppendSelection, Inner, Join}; use query_source::*; use result::QueryResult; #[derive(Debug, Clone, Copy, QueryId)] #[doc(hidden)] #[must_use = "Queries are only executed when calling `load`, `get_result` or similar."] pub struct SelectStatement< From, Select = DefaultSelectClause, Distinct = NoDistinctClause, Where = NoWhereClause, Order = NoOrderClause, Limit = NoLimitClause, Offset = NoOffsetClause, GroupBy = NoGroupByClause, Locking = NoLockingClause, > { pub(crate) select: Select, pub(crate) from: From, pub(crate) distinct: Distinct, pub(crate) where_clause: Where, pub(crate) order: Order, pub(crate) limit: Limit, pub(crate) offset: Offset, pub(crate) group_by: GroupBy, pub(crate) locking: Locking, } impl<F, S, D, W, O, L, Of, G, LC> SelectStatement<F, S, D, W, O, L, Of, G, LC> { #[allow(clippy::too_many_arguments)] pub fn new( select: S, from: F, distinct: D, where_clause: W, order: O, limit: L, offset: Of, group_by: G, locking: LC, ) -> Self { SelectStatement { select: select, from: from, distinct: distinct, where_clause: where_clause, order: order, limit: limit, offset: offset, group_by: group_by, locking: locking, } } } impl<F> SelectStatement<F> { pub fn simple(from: F) -> Self { SelectStatement::new( DefaultSelectClause, from, NoDistinctClause, NoWhereClause, NoOrderClause, NoLimitClause, NoOffsetClause, NoGroupByClause, NoLockingClause, ) } } impl<F, S, D, W, O, L, Of, G, LC> Query for SelectStatement<F, S, D, W, O, L, Of, G, LC> where S: SelectClauseExpression<F>, W: ValidWhereClause<F>, { type SqlType = S::SelectClauseSqlType; } impl<F, S, D, W, O, L, Of, G, LC> SelectQuery for SelectStatement<F, S, D, W, O, L, Of, G, LC> where S: SelectClauseExpression<F>, { type SqlType = S::SelectClauseSqlType; } impl<F, S, D, W, O, L, Of, G, LC, DB> QueryFragment<DB> for SelectStatement<F, S, D, W, O, L, Of, G, LC> where DB: Backend, S: SelectClauseQueryFragment<F, DB>, F: QuerySource, F::FromClause: QueryFragment<DB>, D: QueryFragment<DB>, W: QueryFragment<DB>, O: QueryFragment<DB>, L: QueryFragment<DB>, Of: QueryFragment<DB>, G: QueryFragment<DB>, LC: QueryFragment<DB>, { fn walk_ast(&self, mut out: AstPass<DB>) -> QueryResult<()> { out.push_sql("SELECT "); self.distinct.walk_ast(out.reborrow())?; self.select.walk_ast(&self.from, out.reborrow())?; out.push_sql(" FROM "); self.from.from_clause().walk_ast(out.reborrow())?; self.where_clause.walk_ast(out.reborrow())?; self.group_by.walk_ast(out.reborrow())?; self.order.walk_ast(out.reborrow())?; self.limit.walk_ast(out.reborrow())?; self.offset.walk_ast(out.reborrow())?; self.locking.walk_ast(out.reborrow())?; Ok(()) } } impl<S, D, W, O, L, Of, G, LC, DB> QueryFragment<DB> for SelectStatement<(), S, D, W, O, L, Of, G, LC> where DB: Backend, S: SelectClauseQueryFragment<(), DB>, D: QueryFragment<DB>, W: QueryFragment<DB>, O: QueryFragment<DB>, L: QueryFragment<DB>, Of: QueryFragment<DB>, G: QueryFragment<DB>, LC: QueryFragment<DB>, { fn
(&self, mut out: AstPass<DB>) -> QueryResult<()> { out.push_sql("SELECT "); self.distinct.walk_ast(out.reborrow())?; self.select.walk_ast(&(), out.reborrow())?; self.where_clause.walk_ast(out.reborrow())?; self.group_by.walk_ast(out.reborrow())?; self.order.walk_ast(out.reborrow())?; self.limit.walk_ast(out.reborrow())?; self.offset.walk_ast(out.reborrow())?; self.locking.walk_ast(out.reborrow())?; Ok(()) } } impl<S, F, D, W, O, L, Of, G, LC, QS> ValidSubselect<QS> for SelectStatement<F, S, D, W, O, L, Of, LC, G> where Self: SelectQuery, W: ValidWhereClause<Join<F, QS, Inner>>, { } /// Allow `SelectStatement<From>` to act as if it were `From` as long as /// no other query methods have been called on it impl<From, T> AppearsInFromClause<T> for SelectStatement<From> where From: AppearsInFromClause<T>, { type Count = From::Count; } impl<From> QuerySource for SelectStatement<From> where From: QuerySource, From::DefaultSelection: SelectableExpression<Self>, { type FromClause = From::FromClause; type DefaultSelection = From::DefaultSelection; fn from_clause(&self) -> Self::FromClause { self.from.from_clause() } fn default_selection(&self) -> Self::DefaultSelection { self.from.default_selection() } } impl<From, Selection> AppendSelection<Selection> for SelectStatement<From> where From: AppendSelection<Selection>, { type Output = From::Output; fn append_selection(&self, selection: Selection) -> Self::Output { self.from.append_selection(selection) } }
walk_ast
lib.rs
extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate syn; #[macro_use] extern crate quote;
mod methods; mod native_script; mod variant; #[proc_macro_attribute] pub fn methods(meta: TokenStream, input: TokenStream) -> TokenStream { methods::derive_methods(meta, input) } #[proc_macro_derive( NativeClass, attributes(inherit, export, opt, user_data, property, register_with) )] pub fn derive_native_class(input: TokenStream) -> TokenStream { native_script::derive_native_class(input) } #[proc_macro_derive(ToVariant, attributes(variant))] pub fn derive_to_variant(input: TokenStream) -> TokenStream { variant::derive_to_variant(input) } #[proc_macro_derive(FromVariant, attributes(variant))] pub fn derive_from_variant(input: TokenStream) -> TokenStream { variant::derive_from_variant(input) }
use proc_macro::TokenStream;
0002_listing.py
# Generated by Django 3.0.6 on 2020-10-02 12:04 from django.db import migrations, models class
(migrations.Migration): dependencies = [ ('auctions', '0001_initial'), ] operations = [ migrations.CreateModel( name='Listing', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128)), ('description', models.TextField()), ('starting_bid', models.DecimalField(decimal_places=2, max_digits=15)), ('image_url', models.URLField(blank=True)), ('category', models.CharField(max_length=64)), ], ), ]
Migration
context.rs
use error::{Result, Error}; use filters::Filter; use std::collections::HashMap; use token::Token::{self, Identifier, StringLiteral, NumberLiteral, BooleanLiteral}; use value::Value; #[derive(Clone)] pub enum Interrupt { Continue, Break, } type ValueMap = HashMap<String, Value>; #[derive(Default)] pub struct Context { stack: Vec<ValueMap>, globals: ValueMap, /// The current interrupt state. The interrupt state is used by /// the `break` and `continue` tags to halt template rendering /// at a given point and unwind the `render` call stack until /// it reaches an enclosing `for_loop`. At that point the interrupt /// is cleared, and the `for_loop` carries on processing as directed. interrupt: Option<Interrupt>, /// The indices of all the cycles encountered during rendering. cycles: HashMap<String, usize>, // Public for backwards compatability pub filters: HashMap<String, Box<Filter>>, } impl Context { /// Creates a new, empty rendering context. /// /// # Examples /// /// ``` /// # use liquid::Context; /// let ctx = Context::new(); /// assert_eq!(ctx.get_val("test"), None); /// ``` pub fn new() -> Context { Context::with_values_and_filters(HashMap::new(), HashMap::new()) } pub fn with_values(values: HashMap<String, Value>) -> Context { Context::with_values_and_filters(values, HashMap::new()) } pub fn with_filters(filters: HashMap<String, Box<Filter>>) -> Context { Context::with_values_and_filters(HashMap::new(), filters) } pub fn with_values_and_filters(values: HashMap<String, Value>, filters: HashMap<String, Box<Filter>>) -> Context { Context { stack: vec![HashMap::new()], interrupt: None, cycles: HashMap::new(), globals: values, filters: filters, } } pub fn cycle_element(&mut self, name: &str, values: &[Token]) -> Result<Option<Value>> { let index = { let i = self.cycles.entry(name.to_owned()).or_insert(0); let j = *i; *i = (*i + 1) % values.len(); j }; if index >= values.len() { return Err(Error::Render(format!("cycle index {} out of bounds {}", index, values.len()))); } self.evaluate(&values[index]) } pub fn add_filter(&mut self, name: &str, filter: Box<Filter>) { self.filters.insert(name.to_owned(), filter); } pub fn get_filter<'b>(&'b self, name: &str) -> Option<&'b Box<Filter>> { self.filters.get(name) } pub fn interrupted(&self) -> bool { self.interrupt.is_some() } /// Sets the interrupt state. Any previous state is obliterated. pub fn set_interrupt(&mut self, interrupt: Interrupt) { self.interrupt = Some(interrupt); } /// Fetches and clears the interrupt state. pub fn pop_interrupt(&mut self) -> Option<Interrupt> { let rval = self.interrupt.clone(); self.interrupt = None; rval } /// Creates a new variable scope chained to a parent scope. fn push_scope(&mut self) { self.stack.push(HashMap::new()); } /// Removes the topmost stack frame from the local variable stack. /// /// # Panics /// /// This method will panic if popping the topmost frame results in an /// empty stack. Given that a context is created with a top-level stack /// frame already in place, empyting the stack should never happen in a /// well-formed program. fn pop_scope(&mut self)
/// Sets up a new stack frame, executes the supplied function and then /// tears the stack frame down before returning the function's result /// to the caller. /// /// # Examples /// ``` /// # use liquid::{Value, Context}; /// let mut ctx = Context::new(); /// ctx.set_val("test", Value::Num(42f32)); /// ctx.run_in_scope(|mut stack_frame| { /// // stack_frame inherits values from its parent context /// assert_eq!(stack_frame.get_val("test"), Some(&Value::Num(42f32))); /// /// // but can (optionally) override them /// stack_frame.set_local_val("test", Value::Num(3.14f32)); /// assert_eq!(stack_frame.get_val("test"), Some(&Value::Num(3.14f32))); /// }); /// // the original value is unchanged once the scope exits /// assert_eq!(ctx.get_val("test"), Some(&Value::Num(42f32))); /// ``` pub fn run_in_scope<RvalT, FnT>(&mut self, f: FnT) -> RvalT where FnT: FnOnce(&mut Context) -> RvalT { self.push_scope(); let result = f(self); self.pop_scope(); result } /// Internal part of get_val. Walks the scope stack to try and find the /// reqested variable, and failing that checks the global pool. fn get<'a>(&'a self, name: &str) -> Option<&'a Value> { for frame in self.stack.iter().rev() { if let rval @ Some(_) = frame.get(name) { return rval; } } self.globals.get(name) } /// Gets a value from the rendering context. The name value can be a /// dot-separated path to a value. A value will only be returned if /// each link in the chain (excluding the final name) refers to a /// value of type Object. /// /// # Examples /// /// ``` /// # use liquid::{Value, Context}; /// let mut ctx = Context::new(); /// ctx.set_val("test", Value::Num(42f32)); /// assert_eq!(ctx.get_val("test").unwrap(), &Value::Num(42f32)); /// ``` pub fn get_val<'b>(&'b self, name: &str) -> Option<&'b Value> { let mut path = name.split('.'); let key = path.next().unwrap_or(""); let mut rval = self.get(key); // walk the chain of Object values, as specified by the path // passed in name for id in path { match rval { Some(&Value::Object(ref x)) => rval = x.get(id), _ => return None, } } rval } /// Sets a value in the global context. /// /// # Examples /// /// ``` /// # use liquid::{Value, Context}; /// let mut ctx = Context::new(); /// ctx.set_val("test", Value::Num(42f32)); /// assert_eq!(ctx.get_val("test"), Some(&Value::Num(42f32))); /// ``` pub fn set_val(&mut self, name: &str, val: Value) -> Option<Value> { self.globals.insert(name.to_owned(), val) } /// Translates a Token to a Value, looking it up in the context if /// necessary pub fn evaluate(&self, t: &Token) -> Result<Option<Value>> { match t { &NumberLiteral(f) => Ok(Some(Value::Num(f))), &StringLiteral(ref s) => Ok(Some(Value::Str(s.clone()))), &BooleanLiteral(b) => Ok(Some(Value::Bool(b))), &Identifier(ref id) => Ok(self.get_val(id).cloned()), _ => { let msg = format!("Cannot evaluate {}", t); Err(Error::Other(msg)) } } } /// Sets a value to the rendering context. /// Note that it needs to be wrapped in a liquid::Value. /// /// # Panics /// /// Panics if there is no frame on the local values stack. Context /// instances are created with a top-level stack frame in place, so /// this should never happen in a well-formed program. /// /// # Examples /// /// ``` /// # use liquid::{Value, Context}; /// let mut ctx = Context::new(); /// ctx.run_in_scope(|mut local_scope| { /// local_scope.set_val("global", Value::Num(42f32)); /// local_scope.set_local_val("local", Value::Num(163f32)); /// /// assert_eq!(local_scope.get_val("global"), Some(&Value::Num(42f32))); /// assert_eq!(local_scope.get_val("local"), Some(&Value::Num(163f32))); /// }); /// assert_eq!(ctx.get_val("global"), Some(&Value::Num(42f32))); /// assert_eq!(ctx.get_val("local"), None); /// ``` pub fn set_local_val(&mut self, name: &str, val: Value) -> Option<Value> { match self.stack.last_mut() { Some(frame) => frame.insert(name.to_owned(), val), None => panic!("Cannot insert into an empty stack"), } } } #[cfg(test)] mod test { use super::Context; use value::Value; use std::collections::HashMap; #[test] fn get_val() { let mut ctx = Context::new(); let mut post = HashMap::new(); post.insert("number".to_owned(), Value::Num(42f32)); ctx.set_val("post", Value::Object(post)); assert_eq!(ctx.get_val("post.number").unwrap(), &Value::Num(42f32)); } #[test] fn scoped_variables() { let mut ctx = Context::new(); ctx.set_val("test", Value::Num(42f32)); assert_eq!(ctx.get_val("test").unwrap(), &Value::Num(42f32)); ctx.run_in_scope(|mut new_scope| { // assert that values are chained to the parent scope assert_eq!(new_scope.get_val("test").unwrap(), &Value::Num(42f32)); // set a new local value, and assert that it overrides the previous value new_scope.set_local_val("test", Value::Num(3.14f32)); assert_eq!(new_scope.get_val("test").unwrap(), &Value::Num(3.14f32)); // sat a new val that we will pick up outside the scope new_scope.set_val("global", Value::str("some value")); }); // assert that the value has reverted to the old one assert_eq!(ctx.get_val("test").unwrap(), &Value::Num(42f32)); assert_eq!(ctx.get_val("global").unwrap(), &Value::str("some value")); } #[test] fn evaluate_handles_string_literals() { use token::Token::StringLiteral; let ctx = Context::new(); let t = StringLiteral("hello".to_owned()); assert_eq!(ctx.evaluate(&t).unwrap(), Some(Value::str("hello"))); } #[test] fn evaluate_handles_number_literals() { use token::Token::NumberLiteral; let ctx = Context::new(); assert_eq!(ctx.evaluate(&NumberLiteral(42f32)).unwrap(), Some(Value::Num(42f32))); } #[test] fn evaluate_handles_boolean_literals() { use token::Token::BooleanLiteral; let ctx = Context::new(); assert_eq!(ctx.evaluate(&BooleanLiteral(true)).unwrap(), Some(Value::Bool(true))); assert_eq!(ctx.evaluate(&BooleanLiteral(false)).unwrap(), Some(Value::Bool(false))); } #[test] fn evaluate_handles_identifiers() { use token::Token::Identifier; let mut ctx = Context::new(); ctx.set_val("var0", Value::Num(42f32)); assert_eq!(ctx.evaluate(&Identifier("var0".to_owned())).unwrap(), Some(Value::Num(42f32))); assert_eq!(ctx.evaluate(&Identifier("nope".to_owned())).unwrap(), None); } #[test] fn evaluate_returns_none_on_invalid_token() { use token::Token::DotDot; let ctx = Context::new(); assert!(ctx.evaluate(&DotDot).is_err()); } }
{ if let None = self.stack.pop() { panic!("Pop leaves empty stack") }; }
extension.ts
import * as path from 'path' import * as vscode from 'vscode' import {promises as fs, constants as fsconstants} from 'fs' import {parse} from 'groq-js' import {Config, loadConfig} from './config/findConfig' import {GroqContentProvider} from './providers/content-provider' import {GROQCodeLensProvider} from './providers/groq-codelens-provider' import {executeGroq} from './query' export function activate(context: vscode.ExtensionContext) { // Assigned by `readConfig()` let codelens: vscode.Disposable | undefined let useCodelens let openJSONFile let useCDN // Read and listen for configuration updates readConfig() vscode.workspace.onDidChangeConfiguration(() => readConfig()) let resultPanel: vscode.WebviewPanel | undefined let disposable = vscode.commands.registerCommand('sanity.executeGroq', async (groqQuery) => { let config: Config let query: string = groqQuery let params = {} try { config = await loadSanityJson() if (!query) { query = await loadGroqFromFile() } const variables = findVariablesInQuery(query) if (variables.length > 0) { params = await readParamsFile() } // FIXME: Throw error object in webview? const {ms, result} = await executeGroq({ ...config, query, params, useCdn: config.token ? false : useCDN, }) vscode.window.setStatusBarMessage( `Query took ${ms}ms` + (useCDN ? ' with cdn' : ' without cdn'), 10000 ) if (!openJSONFile && !resultPanel) { resultPanel = vscode.window.createWebviewPanel( 'executionResultsWebView',
resultPanel.onDidDispose(() => { resultPanel = undefined }) } if (openJSONFile) { await openInUntitled(result, 'json') } else if (resultPanel) { const contentProvider = await registerContentProvider(context, result || []) const html = await contentProvider.getCurrentHTML() resultPanel.webview.html = html } } catch (err) { vscode.window.showErrorMessage(err.message) return } }) context.subscriptions.push(disposable) function readConfig() { const settings = vscode.workspace.getConfiguration('sanity') openJSONFile = settings.get('openJSONFile', false) useCodelens = settings.get('useCodelens', true) useCDN = settings.get('useCDN', false) if (useCodelens && !codelens) { codelens = vscode.languages.registerCodeLensProvider( ['javascript', 'typescript', 'javascriptreact', 'typescriptreact', 'groq'], new GROQCodeLensProvider() ) context.subscriptions.push(codelens) } else if (!useCodelens && codelens) { const subIndex = context.subscriptions.indexOf(codelens) context.subscriptions.splice(subIndex, 1) codelens.dispose() codelens = undefined } } } async function loadSanityJson() { const config = (await loadConfig(getRootPath())) || (await loadConfig(getWorkspacePath())) if (!config) { throw new Error('Could not resolve sanity.json configuration file') } return config } async function loadGroqFromFile() { const activeTextEditor = vscode.window.activeTextEditor if (!activeTextEditor) { throw new Error('Nothing to execute') } return activeTextEditor.document.getText() } async function registerContentProvider( context: vscode.ExtensionContext, result: any ): Promise<any> { const contentProvider = new GroqContentProvider(result) const registration = vscode.workspace.registerTextDocumentContentProvider('groq', contentProvider) context.subscriptions.push(registration) return contentProvider } function getRootPath(): string { const activeFile = getActiveFileName() const activeDir = path.dirname(activeFile) return activeDir } function getWorkspacePath(): string { const folders = vscode.workspace.workspaceFolders || [] return folders.length > 0 ? folders[0].uri.fsPath : '' } function getActiveFileName(): string { return vscode.window.activeTextEditor?.document.fileName || '' } async function checkFileExists(file) { return fs .access(file, fsconstants.F_OK) .then(() => true) .catch(() => false) } function findVariablesInQuery(query: string): string[] { return findVariables(parse(query), []) } function findVariables(node: any, found: string[]): string[] { if (node && node.type === 'Parameter' && typeof node.name === 'string') { return found.concat(node.name) } if (Array.isArray(node)) { return node.reduce((acc, child) => findVariables(child, acc), found) } if (typeof node !== 'object') { return found } return Object.keys(node).reduce((acc, key) => findVariables(node[key], acc), found) } async function readParamsFile(): Promise<Record<string, unknown>> { let defaultParamFile, absoluteParamFile const activeFile = getActiveFileName() if (activeFile && activeFile !== '') { var pos = activeFile.lastIndexOf('.') absoluteParamFile = activeFile.substr(0, pos < 0 ? activeFile.length : pos) + '.json' if (await checkFileExists(absoluteParamFile)) { defaultParamFile = path.basename(absoluteParamFile) } } const paramsFile = await vscode.window.showInputBox({value: defaultParamFile}) if (!paramsFile) { throw new Error('Invalid param file received') } const content = await fs.readFile(path.join(path.dirname(absoluteParamFile), paramsFile), 'utf8') return JSON.parse(content) } async function openInUntitled(content: string, language?: string) { const cs = JSON.stringify(content) await vscode.workspace.openTextDocument({content: cs}).then((document) => { vscode.window.showTextDocument(document, {viewColumn: vscode.ViewColumn.Beside}) vscode.languages.setTextDocumentLanguage(document, language || 'json') }) }
'GROQ Execution Result', vscode.ViewColumn.Beside, {} )
task_def.rs
// Copyright 2018 Netflix, Inc. // Copyright 2019 Nikita Pekin // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use conductor::TaskDef; fn main()
{ let task_def = TaskDef::new("eat_spam".to_string()); println!("Task definition: {:#?}", task_def); }
test_bitwise.py
"""位元運算子 @詳見:https://www.w3schools.com/python/python_operators.asp 我們可以透過位元運算子在位元層級執行數學運算 """ def test_bitwise_operators(): """位元運算子""" # 及閘(AND Gate
才為 1 # # 範例: # 5 = 0b0101 # 3 = 0b0011 assert 5 & 3 == 1 # 0b0001 # 或閘(OR Gate) # 當兩個輸入任一為 1 時,輸出為 1 # # 範例: # 5 = 0b0101 # 3 = 0b0011 assert 5 | 3 == 7 # 0b0111 # 反相閘(NOT Gate) # 將輸入反向後輸出(二補數運算,十進制結果為:-x-1) # ~5 = ~0101 # = -(0101 + 1) # = -(0110) # = -6(十進制) assert ~5 == -6 # 互斥或閘(XOR Gate) # 輸入相同則輸出為 0、輸入不同則輸出為 1 # # 範例: # 5 = 0b0101 # 3 = 0b0011 number = 5 # 0b0101 number ^= 3 # 0b0011 assert 5 ^ 3 == 6 # 0b0110 # 右移運算子 # 右移運算子會將輸入的位元往右移指定的位數(除以 2 的次方) # # 範例: # 5 = 0b0101 assert 5 >> 1 == 2 # 0b0010 assert 5 >> 2 == 1 # 0b0001 # 左移運算子 # 左移運算子會將輸入的位元往左移指定的位數(乘以 2 的次方) # # 範例: # 5 = 0b0101 assert 5 << 1 == 10 # 0b1010 assert 5 << 2 == 20 # 0b10100
) # 當兩個輸入皆為 1 時,輸出
job.py
# Copyright 2015 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Define API Jobs.""" import copy import threading from six.moves import http_client import google.api_core.future.polling from google.cloud import exceptions from google.cloud.exceptions import NotFound from google.cloud.bigquery.dataset import DatasetReference from google.cloud.bigquery.external_config import ExternalConfig from google.cloud.bigquery.query import _query_param_from_api_repr from google.cloud.bigquery.query import ArrayQueryParameter from google.cloud.bigquery.query import ScalarQueryParameter from google.cloud.bigquery.query import StructQueryParameter from google.cloud.bigquery.query import UDFResource from google.cloud.bigquery.retry import DEFAULT_RETRY from google.cloud.bigquery.schema import SchemaField from google.cloud.bigquery.table import _EmptyRowIterator from google.cloud.bigquery.table import EncryptionConfiguration from google.cloud.bigquery.table import TableReference from google.cloud.bigquery.table import Table from google.cloud.bigquery.table import TimePartitioning from google.cloud.bigquery import _helpers _DONE_STATE = "DONE" _STOPPED_REASON = "stopped" _TIMEOUT_BUFFER_SECS = 0.1 _ERROR_REASON_TO_EXCEPTION = { "accessDenied": http_client.FORBIDDEN, "backendError": http_client.INTERNAL_SERVER_ERROR, "billingNotEnabled": http_client.FORBIDDEN, "billingTierLimitExceeded": http_client.BAD_REQUEST, "blocked": http_client.FORBIDDEN, "duplicate": http_client.CONFLICT, "internalError": http_client.INTERNAL_SERVER_ERROR, "invalid": http_client.BAD_REQUEST, "invalidQuery": http_client.BAD_REQUEST, "notFound": http_client.NOT_FOUND, "notImplemented": http_client.NOT_IMPLEMENTED, "quotaExceeded": http_client.FORBIDDEN, "rateLimitExceeded": http_client.FORBIDDEN, "resourceInUse": http_client.BAD_REQUEST, "resourcesExceeded": http_client.BAD_REQUEST, "responseTooLarge": http_client.FORBIDDEN, "stopped": http_client.OK, "tableUnavailable": http_client.BAD_REQUEST, } def _error_result_to_exception(error_result): """Maps BigQuery error reasons to an exception. The reasons and their matching HTTP status codes are documented on the `troubleshooting errors`_ page. .. _troubleshooting errors: https://cloud.google.com/bigquery\ /troubleshooting-errors :type error_result: Mapping[str, str] :param error_result: The error result from BigQuery. :rtype google.cloud.exceptions.GoogleCloudError: :returns: The mapped exception. """ reason = error_result.get("reason") status_code = _ERROR_REASON_TO_EXCEPTION.get( reason, http_client.INTERNAL_SERVER_ERROR ) return exceptions.from_http_status( status_code, error_result.get("message", ""), errors=[error_result] ) class Compression(object): """The compression type to use for exported files. The default value is :attr:`NONE`. :attr:`DEFLATE` and :attr:`SNAPPY` are only supported for Avro. """ GZIP = "GZIP" """Specifies GZIP format.""" DEFLATE = "DEFLATE" """Specifies DEFLATE format.""" SNAPPY = "SNAPPY" """Specifies SNAPPY format.""" NONE = "NONE" """Specifies no compression.""" class CreateDisposition(object): """Specifies whether the job is allowed to create new tables. The default value is :attr:`CREATE_IF_NEEDED`. Creation, truncation and append actions occur as one atomic update upon job completion. """ CREATE_IF_NEEDED = "CREATE_IF_NEEDED" """If the table does not exist, BigQuery creates the table.""" CREATE_NEVER = "CREATE_NEVER" """The table must already exist. If it does not, a 'notFound' error is returned in the job result.""" class DestinationFormat(object): """The exported file format. The default value is :attr:`CSV`. Tables with nested or repeated fields cannot be exported as CSV. """ CSV = "CSV" """Specifies CSV format.""" NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON" """Specifies newline delimited JSON format.""" AVRO = "AVRO" """Specifies Avro format.""" class Encoding(object): """The character encoding of the data. The default is :attr:`UTF_8`. BigQuery decodes the data after the raw, binary data has been split using the values of the quote and fieldDelimiter properties. """ UTF_8 = "UTF-8" """Specifies UTF-8 encoding.""" ISO_8859_1 = "ISO-8859-1" """Specifies ISO-8859-1 encoding.""" class QueryPriority(object): """Specifies a priority for the query. The default value is :attr:`INTERACTIVE`. """ INTERACTIVE = "INTERACTIVE" """Specifies interactive priority.""" BATCH = "BATCH" """Specifies batch priority.""" class SourceFormat(object): """The format of the data files. The default value is :attr:`CSV`. Note that the set of allowed values for loading data is different than the set used for external data sources (see :class:`~google.cloud.bigquery.external_config.ExternalSourceFormat`). """ CSV = "CSV" """Specifies CSV format.""" DATASTORE_BACKUP = "DATASTORE_BACKUP" """Specifies datastore backup format""" NEWLINE_DELIMITED_JSON = "NEWLINE_DELIMITED_JSON" """Specifies newline delimited JSON format.""" AVRO = "AVRO" """Specifies Avro format.""" PARQUET = "PARQUET" """Specifies Parquet format.""" ORC = "ORC" """Specifies Orc format.""" class WriteDisposition(object): """Specifies the action that occurs if destination table already exists. The default value is :attr:`WRITE_APPEND`. Each action is atomic and only occurs if BigQuery is able to complete the job successfully. Creation, truncation and append actions occur as one atomic update upon job completion. """ WRITE_APPEND = "WRITE_APPEND" """If the table already exists, BigQuery appends the data to the table.""" WRITE_TRUNCATE = "WRITE_TRUNCATE" """If the table already exists, BigQuery overwrites the table data.""" WRITE_EMPTY = "WRITE_EMPTY" """If the table already exists and contains data, a 'duplicate' error is returned in the job result.""" class SchemaUpdateOption(object): """Specifies an update to the destination table schema as a side effect of a load job. """ ALLOW_FIELD_ADDITION = "ALLOW_FIELD_ADDITION" """Allow adding a nullable field to the schema.""" ALLOW_FIELD_RELAXATION = "ALLOW_FIELD_RELAXATION" """Allow relaxing a required field in the original schema to nullable.""" class _JobReference(object): """A reference to a job. Arguments: job_id (str): ID of the job to run. project (str): ID of the project where the job runs. location (str): Location of where the job runs. """ def __init__(self, job_id, project, location): self._properties = {"jobId": job_id, "projectId": project} # The location field must not be populated if it is None. if location: self._properties["location"] = location @property def job_id(self): """str: ID of the job.""" return self._properties.get("jobId") @property def project(self): """str: ID of the project where the job runs.""" return self._properties.get("projectId") @property def location(self): """str: Location where the job runs.""" return self._properties.get("location") def _to_api_repr(self): """Returns the API resource representation of the job reference.""" return copy.deepcopy(self._properties) @classmethod def _from_api_repr(cls, resource): """Returns a job reference for an API resource representation.""" job_id = resource.get("jobId") project = resource.get("projectId") location = resource.get("location") job_ref = cls(job_id, project, location) return job_ref class _AsyncJob(google.api_core.future.polling.PollingFuture): """Base class for asynchronous jobs. Arguments: job_id (Union[str, _JobReference]): Job's ID in the project associated with the client or a fully-qualified job reference. client (google.cloud.bigquery.client.Client): Client which holds credentials and project configuration. """ def __init__(self, job_id, client): super(_AsyncJob, self).__init__() # The job reference can be either a plain job ID or the full resource. # Populate the properties dictionary consistently depending on what has # been passed in. job_ref = job_id if not isinstance(job_id, _JobReference): job_ref = _JobReference(job_id, client.project, None) self._properties = {"jobReference": job_ref._to_api_repr()} self._client = client self._result_set = False self._completion_lock = threading.Lock() @property def job_id(self): """str: ID of the job.""" return _helpers._get_sub_prop(self._properties, ["jobReference", "jobId"]) @property def project(self): """Project bound to the job. :rtype: str :returns: the project (derived from the client). """ return _helpers._get_sub_prop(self._properties, ["jobReference", "projectId"]) @property def location(self): """str: Location where the job runs.""" return _helpers._get_sub_prop(self._properties, ["jobReference", "location"]) def _require_client(self, client): """Check client or verify over-ride. :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :rtype: :class:`google.cloud.bigquery.client.Client` :returns: The client passed in or the currently bound client. """ if client is None: client = self._client return client @property def job_type(self): """Type of job :rtype: str :returns: one of 'load', 'copy', 'extract', 'query' """ return self._JOB_TYPE @property def path(self): """URL path for the job's APIs. :rtype: str :returns: the path based on project and job ID. """ return "/projects/%s/jobs/%s" % (self.project, self.job_id) @property def labels(self): """Dict[str, str]: Labels for the job.""" return self._properties.setdefault("labels", {}) @property def etag(self): """ETag for the job resource. :rtype: str, or ``NoneType`` :returns: the ETag (None until set from the server). """ return self._properties.get("etag") @property def self_link(self): """URL for the job resource. :rtype: str, or ``NoneType`` :returns: the URL (None until set from the server). """ return self._properties.get("selfLink") @property def user_email(self): """E-mail address of user who submitted the job. :rtype: str, or ``NoneType`` :returns: the URL (None until set from the server). """ return self._properties.get("user_email") @property def created(self): """Datetime at which the job was created. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the creation time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: millis = statistics.get("creationTime") if millis is not None: return _helpers._datetime_from_microseconds(millis * 1000.0) @property def started(self): """Datetime at which the job was started. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the start time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: millis = statistics.get("startTime") if millis is not None: return _helpers._datetime_from_microseconds(millis * 1000.0) @property def ended(self): """Datetime at which the job finished. :rtype: ``datetime.datetime``, or ``NoneType`` :returns: the end time (None until set from the server). """ statistics = self._properties.get("statistics") if statistics is not None: millis = statistics.get("endTime") if millis is not None: return _helpers._datetime_from_microseconds(millis * 1000.0) def _job_statistics(self): """Helper for job-type specific statistics-based properties.""" statistics = self._properties.get("statistics", {}) return statistics.get(self._JOB_TYPE, {}) @property def error_result(self): """Error information about the job as a whole. :rtype: mapping, or ``NoneType`` :returns: the error information (None until set from the server). """ status = self._properties.get("status") if status is not None: return status.get("errorResult") @property def errors(self): """Information about individual errors generated by the job. :rtype: list of mappings, or ``NoneType`` :returns: the error information (None until set from the server). """ status = self._properties.get("status") if status is not None: return status.get("errors") @property def state(self): """Status of the job. :rtype: str, or ``NoneType`` :returns: the state (None until set from the server). """ status = self._properties.get("status") if status is not None: return status.get("state") def _scrub_local_properties(self, cleaned): """Helper: handle subclass properties in cleaned.""" pass def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" raise NotImplementedError("Abstract") def _set_properties(self, api_response): """Update properties from resource in body of ``api_response`` :type api_response: dict :param api_response: response returned from an API call """ cleaned = api_response.copy() self._scrub_local_properties(cleaned) statistics = cleaned.get("statistics", {}) if "creationTime" in statistics: statistics["creationTime"] = float(statistics["creationTime"]) if "startTime" in statistics: statistics["startTime"] = float(statistics["startTime"]) if "endTime" in statistics: statistics["endTime"] = float(statistics["endTime"]) self._properties.clear() self._properties.update(cleaned) self._copy_configuration_properties(cleaned.get("configuration", {})) # For Future interface self._set_future_result() @classmethod def _get_resource_config(cls, resource): """Helper for :meth:`from_api_repr` :type resource: dict :param resource: resource for the job :rtype: dict :returns: tuple (string, dict), where the first element is the job ID and the second contains job-specific configuration. :raises: :class:`KeyError` if the resource has no identifier, or is missing the appropriate configuration. """ if "jobReference" not in resource or "jobId" not in resource["jobReference"]: raise KeyError( "Resource lacks required identity information: " '["jobReference"]["jobId"]' ) job_id = resource["jobReference"]["jobId"] if ( "configuration" not in resource or cls._JOB_TYPE not in resource["configuration"] ): raise KeyError( "Resource lacks required configuration: " '["configuration"]["%s"]' % cls._JOB_TYPE ) return job_id, resource["configuration"] def to_api_repr(self): """Generate a resource for the job.""" raise NotImplementedError("Abstract") _build_resource = to_api_repr # backward-compatibility alias def _begin(self, client=None, retry=DEFAULT_RETRY): """API call: begin the job via a POST request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/insert :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :raises: :exc:`ValueError` if the job has already begin. """ if self.state is not None: raise ValueError("Job already begun.") client = self._require_client(client) path = "/projects/%s/jobs" % (self.project,) # jobs.insert is idempotent because we ensure that every new # job has an ID. api_response = client._call_api( retry, method="POST", path=path, data=self.to_api_repr() ) self._set_properties(api_response) def exists(self, client=None, retry=DEFAULT_RETRY): """API call: test for the existence of the job via a GET request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: bool :returns: Boolean indicating existence of the job. """ client = self._require_client(client) extra_params = {"fields": "id"} if self.location: extra_params["location"] = self.location try: client._call_api( retry, method="GET", path=self.path, query_params=extra_params ) except NotFound: return False else: return True def reload(self, client=None, retry=DEFAULT_RETRY): """API call: refresh job properties via a GET request. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. """ client = self._require_client(client) extra_params = {} if self.location: extra_params["location"] = self.location api_response = client._call_api( retry, method="GET", path=self.path, query_params=extra_params ) self._set_properties(api_response) def cancel(self, client=None): """API call: cancel job via a POST request See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/cancel :type client: :class:`~google.cloud.bigquery.client.Client` or ``NoneType`` :param client: the client to use. If not passed, falls back to the ``client`` stored on the current dataset. :rtype: bool :returns: Boolean indicating that the cancel request was sent. """ client = self._require_client(client) extra_params = {} if self.location: extra_params["location"] = self.location api_response = client._connection.api_request( method="POST", path="%s/cancel" % (self.path,), query_params=extra_params ) self._set_properties(api_response["job"]) # The Future interface requires that we return True if the *attempt* # to cancel was successful. return True # The following methods implement the PollingFuture interface. Note that # the methods above are from the pre-Future interface and are left for # compatibility. The only "overloaded" method is :meth:`cancel`, which # satisfies both interfaces. def _set_future_result(self): """Set the result or exception from the job if it is complete.""" # This must be done in a lock to prevent the polling thread # and main thread from both executing the completion logic # at the same time. with self._completion_lock: # If the operation isn't complete or if the result has already been # set, do not call set_result/set_exception again. # Note: self._result_set is set to True in set_result and # set_exception, in case those methods are invoked directly. if self.state != _DONE_STATE or self._result_set: return if self.error_result is not None: exception = _error_result_to_exception(self.error_result) self.set_exception(exception) else: self.set_result(self) def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: bool :returns: True if the job is complete, False otherwise. """ # Do not refresh is the state is already done, as the job will not # change once complete. if self.state != _DONE_STATE: self.reload(retry=retry) return self.state == _DONE_STATE def result(self, timeout=None, retry=DEFAULT_RETRY): """Start the job and wait for it to complete and get the result. :type timeout: float :param timeout: How long (in seconds) to wait for job to complete before raising a :class:`concurrent.futures.TimeoutError`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the RPC. :rtype: _AsyncJob :returns: This instance. :raises: :class:`~google.cloud.exceptions.GoogleCloudError` if the job failed or :class:`concurrent.futures.TimeoutError` if the job did not complete in the given timeout. """ if self.state is None: self._begin(retry=retry) # TODO: modify PollingFuture so it can pass a retry argument to done(). return super(_AsyncJob, self).result(timeout=timeout) def cancelled(self): """Check if the job has been cancelled. This always returns False. It's not possible to check if a job was cancelled in the API. This method is here to satisfy the interface for :class:`google.api_core.future.Future`. :rtype: bool :returns: False """ return ( self.error_result is not None and self.error_result.get("reason") == _STOPPED_REASON ) class _JobConfig(object): """Abstract base class for job configuration objects. Arguments: job_type (str): The key to use for the job configuration. """ def __init__(self, job_type, **kwargs): self._job_type = job_type self._properties = {job_type: {}} for prop, val in kwargs.items(): setattr(self, prop, val) @property def labels(self): """Dict[str, str]: Labels for the job. This method always returns a dict. To change a job's labels, modify the dict, then call ``Client.update_job``. To delete a label, set its value to :data:`None` before updating. Raises: ValueError: If ``value`` type is invalid. """ return self._properties.setdefault("labels", {}) @labels.setter def labels(self, value): if not isinstance(value, dict): raise ValueError("Pass a dict") self._properties["labels"] = value def _get_sub_prop(self, key, default=None): """Get a value in the ``self._properties[self._job_type]`` dictionary. Most job properties are inside the dictionary related to the job type (e.g. 'copy', 'extract', 'load', 'query'). Use this method to access those properties:: self._get_sub_prop('destinationTable') This is equivalent to using the ``_helpers._get_sub_prop`` function:: _helpers._get_sub_prop( self._properties, ['query', 'destinationTable']) Arguments: key (str): Key for the value to get in the ``self._properties[self._job_type]`` dictionary. default (object): (Optional) Default value to return if the key is not found. Defaults to :data:`None`. Returns: object: The value if present or the default. """ return _helpers._get_sub_prop( self._properties, [self._job_type, key], default=default ) def _set_sub_prop(self, key, value): """Set a value in the ``self._properties[self._job_type]`` dictionary. Most job properties are inside the dictionary related to the job type (e.g. 'copy', 'extract', 'load', 'query'). Use this method to set those properties:: self._set_sub_prop('useLegacySql', False) This is equivalent to using the ``_helper._set_sub_prop`` function:: _helper._set_sub_prop( self._properties, ['query', 'useLegacySql'], False) Arguments: key (str): Key to set in the ``self._properties[self._job_type]`` dictionary. value (object): Value to set. """ _helpers._set_sub_prop(self._properties, [self._job_type, key], value) def _del_sub_prop(self, key): """Remove ``key`` from the ``self._properties[self._job_type]`` dict. Most job properties are inside the dictionary related to the job type (e.g. 'copy', 'extract', 'load', 'query'). Use this method to clear those properties:: self._del_sub_prop('useLegacySql') This is equivalent to using the ``_helper._del_sub_prop`` function:: _helper._del_sub_prop( self._properties, ['query', 'useLegacySql']) Arguments: key (str): Key to remove in the ``self._properties[self._job_type]`` dictionary. """ _helpers._del_sub_prop(self._properties, [self._job_type, key]) def to_api_repr(self): """Build an API representation of the job config. :rtype: dict :returns: A dictionary in the format used by the BigQuery API. """ return copy.deepcopy(self._properties) def _fill_from_default(self, default_job_config): """Merge this job config with a default job config. The keys in this object take precedence over the keys in the default config. The merge is done at the top-level as well as for keys one level below the job type. Arguments: default_job_config (google.cloud.bigquery.job._JobConfig): The default job config that will be used to fill in self. Returns: google.cloud.bigquery.job._JobConfig A new (merged) job config. """ if self._job_type != default_job_config._job_type: raise TypeError( "attempted to merge two incompatible job types: " + repr(self._job_type) + ", " + repr(default_job_config._job_type) ) new_job_config = self.__class__() default_job_properties = copy.deepcopy(default_job_config._properties) for key in self._properties: if key != self._job_type: default_job_properties[key] = self._properties[key] default_job_properties[self._job_type].update(self._properties[self._job_type]) new_job_config._properties = default_job_properties return new_job_config @classmethod def from_api_repr(cls, resource): """Factory: construct a job configuration given its API representation :type resource: dict :param resource: An extract job configuration in the same representation as is returned from the API. :rtype: :class:`google.cloud.bigquery.job._JobConfig` :returns: Configuration parsed from ``resource``. """ config = cls() config._properties = copy.deepcopy(resource) return config class LoadJobConfig(_JobConfig): """Configuration options for load jobs. All properties in this class are optional. Values which are :data:`None` -> server defaults. Set properties on the constructed configuration by using the property name as the name of a keyword argument. """ def __init__(self, **kwargs): super(LoadJobConfig, self).__init__("load", **kwargs) @property def allow_jagged_rows(self): """bool: Allow missing trailing optional columns (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowJaggedRows """ return self._get_sub_prop("allowJaggedRows") @allow_jagged_rows.setter def allow_jagged_rows(self, value): self._set_sub_prop("allowJaggedRows", value) @property def allow_quoted_newlines(self): """bool: Allow quoted data containing newline characters (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.allowQuotedNewlines """ return self._get_sub_prop("allowQuotedNewlines") @allow_quoted_newlines.setter def allow_quoted_newlines(self, value): self._set_sub_prop("allowQuotedNewlines", value) @property def autodetect(self): """bool: Automatically infer the schema from a sample of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.autodetect """ return self._get_sub_prop("autodetect") @autodetect.setter def autodetect(self, value): self._set_sub_prop("autodetect", value) @property def clustering_fields(self): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). Clustering fields are immutable after table creation. .. note:: As of 2018-06-29, clustering fields cannot be set on a table which does not also have time partioning defined. """ prop = self._get_sub_prop("clustering") if prop is not None: return list(prop.get("fields", ())) @clustering_fields.setter def clustering_fields(self, value): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). """ if value is not None: self._set_sub_prop("clustering", {"fields": value}) else: self._del_sub_prop("clustering") @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.createDisposition """ return self._get_sub_prop("createDisposition") @create_disposition.setter def create_disposition(self, value): self._set_sub_prop("createDisposition", value) @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationEncryptionConfiguration """ prop = self._get_sub_prop("destinationEncryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop("destinationEncryptionConfiguration", api_repr) else: self._del_sub_prop("destinationEncryptionConfiguration") @property def destination_table_description(self): """Union[str, None] name given to destination table. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationTableProperties.description """ prop = self._get_sub_prop("destinationTableProperties") if prop is not None: return prop["description"] @destination_table_description.setter def destination_table_description(self, value): keys = [self._job_type, "destinationTableProperties", "description"] if value is not None: _helpers._set_sub_prop(self._properties, keys, value) else: _helpers._del_sub_prop(self._properties, keys) @property def destination_table_friendly_name(self): """Union[str, None] name given to destination table. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.destinationTableProperties.friendlyName """ prop = self._get_sub_prop("destinationTableProperties") if prop is not None: return prop["friendlyName"] @destination_table_friendly_name.setter def destination_table_friendly_name(self, value): keys = [self._job_type, "destinationTableProperties", "friendlyName"] if value is not None: _helpers._set_sub_prop(self._properties, keys, value) else: _helpers._del_sub_prop(self._properties, keys) @property def encoding(self): """google.cloud.bigquery.job.Encoding: The character encoding of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.encoding """ return self._get_sub_prop("encoding") @encoding.setter def encoding(self, value): self._set_sub_prop("encoding", value) @property def field_delimiter(self): """str: The separator for fields in a CSV file. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.fieldDelimiter """ return self._get_sub_prop("fieldDelimiter") @field_delimiter.setter def field_delimiter(self, value): self._set_sub_prop("fieldDelimiter", value) @property def ignore_unknown_values(self): """bool: Ignore extra values not represented in the table schema. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.ignoreUnknownValues """ return self._get_sub_prop("ignoreUnknownValues") @ignore_unknown_values.setter def ignore_unknown_values(self, value): self._set_sub_prop("ignoreUnknownValues", value) @property def max_bad_records(self): """int: Number of invalid rows to ignore. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.maxBadRecords """ return _helpers._int_or_none(self._get_sub_prop("maxBadRecords")) @max_bad_records.setter def max_bad_records(self, value): self._set_sub_prop("maxBadRecords", value) @property def null_marker(self): """str: Represents a null value (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.nullMarker """ return self._get_sub_prop("nullMarker") @null_marker.setter def null_marker(self, value): self._set_sub_prop("nullMarker", value) @property def quote_character(self): """str: Character used to quote data sections (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.quote """ return self._get_sub_prop("quote") @quote_character.setter def quote_character(self, value): self._set_sub_prop("quote", value) @property def schema(self): """List[google.cloud.bigquery.schema.SchemaField]: Schema of the destination table. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema """ schema = _helpers._get_sub_prop(self._properties, ["load", "schema", "fields"]) if schema is None: return return [SchemaField.from_api_repr(field) for field in schema] @schema.setter def schema(self, value): if not all(hasattr(field, "to_api_repr") for field in value): raise ValueError("Schema items must be fields") _helpers._set_sub_prop( self._properties, ["load", "schema", "fields"], [field.to_api_repr() for field in value], ) @property def schema_update_options(self): """List[google.cloud.bigquery.job.SchemaUpdateOption]: Specifies updates to the destination table schema to allow as a side effect of the load job. """ return self._get_sub_prop("schemaUpdateOptions") @schema_update_options.setter def schema_update_options(self, values): self._set_sub_prop("schemaUpdateOptions", values) @property def skip_leading_rows(self): """int: Number of rows to skip when reading data (CSV only). See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.skipLeadingRows """ return _helpers._int_or_none(self._get_sub_prop("skipLeadingRows")) @skip_leading_rows.setter def skip_leading_rows(self, value): self._set_sub_prop("skipLeadingRows", str(value)) @property def source_format(self): """google.cloud.bigquery.job.SourceFormat: File format of the data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceFormat """ return self._get_sub_prop("sourceFormat") @source_format.setter def source_format(self, value): self._set_sub_prop("sourceFormat", value) @property def time_partitioning(self): """google.cloud.bigquery.table.TimePartitioning: Specifies time-based partitioning for the destination table. """ prop = self._get_sub_prop("timePartitioning") if prop is not None: prop = TimePartitioning.from_api_repr(prop) return prop @time_partitioning.setter def time_partitioning(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop("timePartitioning", api_repr) else: self._del_sub_prop("timePartitioning") @property def use_avro_logical_types(self): """bool: For loads of Avro data, governs whether Avro logical types are converted to their corresponding BigQuery types(e.g. TIMESTAMP) rather than raw types (e.g. INTEGER). """ return self._get_sub_prop("useAvroLogicalTypes") @use_avro_logical_types.setter def use_avro_logical_types(self, value): self._set_sub_prop("useAvroLogicalTypes", bool(value)) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.writeDisposition """ return self._get_sub_prop("writeDisposition") @write_disposition.setter def write_disposition(self, value): self._set_sub_prop("writeDisposition", value) class LoadJob(_AsyncJob): """Asynchronous job for loading data into a table. Can load from Google Cloud Storage URIs or from a file. :type job_id: str :param job_id: the job's ID :type source_uris: sequence of string or ``NoneType`` :param source_uris: URIs of one or more data files to be loaded. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.sourceUris for supported URI formats. Pass None for jobs that load from a file. :type destination: :class:`google.cloud.bigquery.table.TableReference` :param destination: reference to table into which data is to be loaded. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). """ _JOB_TYPE = "load" def __init__(self, job_id, source_uris, destination, client, job_config=None): super(LoadJob, self).__init__(job_id, client) if job_config is None: job_config = LoadJobConfig() self.source_uris = source_uris self.destination = destination self._configuration = job_config @property def allow_jagged_rows(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_jagged_rows`. """ return self._configuration.allow_jagged_rows @property def allow_quoted_newlines(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.allow_quoted_newlines`. """ return self._configuration.allow_quoted_newlines @property def autodetect(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.autodetect`. """ return self._configuration.autodetect @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def encoding(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.encoding`. """ return self._configuration.encoding @property def field_delimiter(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.field_delimiter`. """ return self._configuration.field_delimiter @property def ignore_unknown_values(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.ignore_unknown_values`. """ return self._configuration.ignore_unknown_values @property def max_bad_records(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.max_bad_records`. """ return self._configuration.max_bad_records @property def null_marker(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.null_marker`. """ return self._configuration.null_marker @property def quote_character(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.quote_character`. """ return self._configuration.quote_character @property def skip_leading_rows(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.skip_leading_rows`. """ return self._configuration.skip_leading_rows @property def source_format(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.source_format`. """ return self._configuration.source_format @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def schema(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.schema`. """ return self._configuration.schema @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See :attr:`google.cloud.bigquery.job.LoadJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration @property def time_partitioning(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.time_partitioning`. """ return self._configuration.time_partitioning @property def use_avro_logical_types(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.use_avro_logical_types`. """ return self._configuration.use_avro_logical_types @property def clustering_fields(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.clustering_fields`. """ return self._configuration.clustering_fields @property def schema_update_options(self): """See :attr:`google.cloud.bigquery.job.LoadJobConfig.schema_update_options`. """ return self._configuration.schema_update_options @property def input_file_bytes(self): """Count of bytes loaded from source files. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). :raises: ValueError for invalid value types. """ return _helpers._int_or_none( _helpers._get_sub_prop( self._properties, ["statistics", "load", "inputFileBytes"] ) ) @property def input_files(self): """Count of source files. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( self._properties, ["statistics", "load", "inputFiles"] ) ) @property def output_bytes(self): """Count of bytes saved to destination table. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( self._properties, ["statistics", "load", "outputBytes"] ) ) @property def output_rows(self): """Count of rows saved to destination table. :rtype: int, or ``NoneType`` :returns: the count (None until set from the server). """ return _helpers._int_or_none( _helpers._get_sub_prop( self._properties, ["statistics", "load", "outputRows"] ) ) def to_api_repr(self): """Generate a resource for :meth:`_begin`.""" configuration = self._configuration.to_api_repr() if self.source_uris is not None: _helpers._set_sub_prop( configuration, ["load", "sourceUris"], self.source_uris ) _helpers._set_sub_prop( configuration, ["load", "destinationTable"], self.destination.to_api_repr() ) return { "jobReference": self._properties["jobReference"], "configuration": configuration, } def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.LoadJob` :returns: Job parsed from ``resource``. """ config_resource = resource.get("configuration", {}) config = LoadJobConfig.from_api_repr(config_resource) # A load job requires a destination table. dest_config = config_resource["load"]["destinationTable"] ds_ref = DatasetReference(dest_config["projectId"], dest_config["datasetId"]) destination = TableReference(ds_ref, dest_config["tableId"]) # sourceUris will be absent if this is a file upload. source_uris = _helpers._get_sub_prop(config_resource, ["load", "sourceUris"]) job_ref = _JobReference._from_api_repr(resource["jobReference"]) job = cls(job_ref, source_uris, destination, client, config) job._set_properties(resource) return job class CopyJobConfig(_JobConfig): """Configuration options for copy jobs. All properties in this class are optional. Values which are :data:`None` -> server defaults. Set properties on the constructed configuration by using the property name as the name of a keyword argument. """ def __init__(self, **kwargs): super(CopyJobConfig, self).__init__("copy", **kwargs) @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.createDisposition """ return self._get_sub_prop("createDisposition") @create_disposition.setter def create_disposition(self, value): self._set_sub_prop("createDisposition", value) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.writeDisposition """ return self._get_sub_prop("writeDisposition") @write_disposition.setter def write_disposition(self, value): self._set_sub_prop("writeDisposition", value) @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.copy.destinationEncryptionConfiguration """ prop = self._get_sub_prop("destinationEncryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop("destinationEncryptionConfiguration", api_repr) class CopyJob(_AsyncJob): """Asynchronous job: copy data into a table from other tables. :type job_id: str :param job_id: the job's ID, within the project belonging to ``client``. :type sources: list of :class:`google.cloud.bigquery.table.TableReference` :param sources: Table from which data is to be loaded. :type destination: :class:`google.cloud.bigquery.table.TableReference` :param destination: Table into which data is to be loaded. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). :type job_config: :class:`~google.cloud.bigquery.job.CopyJobConfig` :param job_config: (Optional) Extra configuration options for the copy job. """ _JOB_TYPE = "copy" def __init__(self, job_id, sources, destination, client, job_config=None): super(CopyJob, self).__init__(job_id, client) if job_config is None: job_config = CopyJobConfig() self.destination = destination self.sources = sources self._configuration = job_config @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.CopyJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.CopyJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See :attr:`google.cloud.bigquery.job.CopyJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration def to_api_repr(self): """Generate a resource for :meth:`_begin`.""" source_refs = [ { "projectId": table.project, "datasetId": table.dataset_id, "tableId": table.table_id, } for table in self.sources ] configuration = self._configuration.to_api_repr() _helpers._set_sub_prop(configuration, ["copy", "sourceTables"], source_refs) _helpers._set_sub_prop( configuration, ["copy", "destinationTable"], { "projectId": self.destination.project, "datasetId": self.destination.dataset_id, "tableId": self.destination.table_id, }, ) return { "jobReference": self._properties["jobReference"], "configuration": configuration, } def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.CopyJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = CopyJobConfig.from_api_repr(config_resource) # Copy required fields to the job. copy_resource = config_resource["copy"] destination = TableReference.from_api_repr(copy_resource["destinationTable"]) sources = [] source_configs = copy_resource.get("sourceTables") if source_configs is None: single = copy_resource.get("sourceTable") if single is None: raise KeyError("Resource missing 'sourceTables' / 'sourceTable'") source_configs = [single] for source_config in source_configs: table_ref = TableReference.from_api_repr(source_config) sources.append(table_ref) job = cls(job_id, sources, destination, client=client, job_config=config) job._set_properties(resource) return job class ExtractJobConfig(_JobConfig): """Configuration options for extract jobs. All properties in this class are optional. Values which are :data:`None` -> server defaults. Set properties on the constructed configuration by using the property name as the name of a keyword argument. """ def __init__(self, **kwargs): super(ExtractJobConfig, self).__init__("extract", **kwargs) @property def compression(self): """google.cloud.bigquery.job.Compression: Compression type to use for exported files. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.compression """ return self._get_sub_prop("compression") @compression.setter def compression(self, value): self._set_sub_prop("compression", value) @property def destination_format(self): """google.cloud.bigquery.job.DestinationFormat: Exported file format. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.destinationFormat """ return self._get_sub_prop("destinationFormat") @destination_format.setter def destination_format(self, value): self._set_sub_prop("destinationFormat", value) @property def field_delimiter(self): """str: Delimiter to use between fields in the exported data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.fieldDelimiter """ return self._get_sub_prop("fieldDelimiter") @field_delimiter.setter def field_delimiter(self, value): self._set_sub_prop("fieldDelimiter", value) @property def print_header(self): """bool: Print a header row in the exported data. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.extract.printHeader """ return self._get_sub_prop("printHeader") @print_header.setter def print_header(self, value): self._set_sub_prop("printHeader", value) class ExtractJob(_AsyncJob): """Asynchronous job: extract data from a table into Cloud Storage. :type job_id: str :param job_id: the job's ID :type source: :class:`google.cloud.bigquery.table.TableReference` :param source: Table into which data is to be loaded. :type destination_uris: list of string :param destination_uris: URIs describing where the extracted data will be written in Cloud Storage, using the format ``gs://<bucket_name>/<object_name_or_glob>``. :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration. :type job_config: :class:`~google.cloud.bigquery.job.ExtractJobConfig` :param job_config: (Optional) Extra configuration options for the extract job. """ _JOB_TYPE = "extract" def __init__(self, job_id, source, destination_uris, client, job_config=None): super(ExtractJob, self).__init__(job_id, client) if job_config is None: job_config = ExtractJobConfig() self.source = source self.destination_uris = destination_uris self._configuration = job_config @property def compression(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.compression`. """ return self._configuration.compression @property def destination_format(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.destination_format`. """ return self._configuration.destination_format @property def field_delimiter(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.field_delimiter`. """ return self._configuration.field_delimiter @property def print_header(self): """See :attr:`google.cloud.bigquery.job.ExtractJobConfig.print_header`. """ return self._configuration.print_header @property def destination_uri_file_counts(self): """Return file counts from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts Returns: a list of integer counts, each representing the number of files per destination URI or URI pattern specified in the extract configuration. These values will be in the same order as the URIs specified in the 'destinationUris' field. Returns None if job is not yet complete. """ counts = self._job_statistics().get("destinationUriFileCounts") if counts is not None: return [int(count) for count in counts] return None def to_api_repr(self): """Generate a resource for :meth:`_begin`.""" source_ref = { "projectId": self.source.project, "datasetId": self.source.dataset_id, "tableId": self.source.table_id, } configuration = self._configuration.to_api_repr() _helpers._set_sub_prop(configuration, ["extract", "sourceTable"], source_ref) _helpers._set_sub_prop( configuration, ["extract", "destinationUris"], self.destination_uris ) return { "jobReference": self._properties["jobReference"], "configuration": configuration, } def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation .. note: This method assumes that the project found in the resource matches the client's project. :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.ExtractJob` :returns: Job parsed from ``resource``. """ job_id, config_resource = cls._get_resource_config(resource) config = ExtractJobConfig.from_api_repr(config_resource) source_config = _helpers._get_sub_prop( config_resource, ["extract", "sourceTable"] ) dataset = DatasetReference( source_config["projectId"], source_config["datasetId"] ) source = dataset.table(source_config["tableId"]) destination_uris = _helpers._get_sub_prop( config_resource, ["extract", "destinationUris"] ) job = cls(job_id, source, destination_uris, client=client, job_config=config) job._set_properties(resource) return job def _from_api_repr_query_parameters(resource):
return [_query_param_from_api_repr(mapping) for mapping in resource] def _to_api_repr_query_parameters(value): return [query_parameter.to_api_repr() for query_parameter in value] def _from_api_repr_udf_resources(resource): udf_resources = [] for udf_mapping in resource: for udf_type, udf_value in udf_mapping.items(): udf_resources.append(UDFResource(udf_type, udf_value)) return udf_resources def _to_api_repr_udf_resources(value): return [{udf_resource.udf_type: udf_resource.value} for udf_resource in value] def _from_api_repr_table_defs(resource): return {k: ExternalConfig.from_api_repr(v) for k, v in resource.items()} def _to_api_repr_table_defs(value): return {k: ExternalConfig.to_api_repr(v) for k, v in value.items()} class QueryJobConfig(_JobConfig): """Configuration options for query jobs. All properties in this class are optional. Values which are :data:`None` -> server defaults. Set properties on the constructed configuration by using the property name as the name of a keyword argument. """ def __init__(self, **kwargs): super(QueryJobConfig, self).__init__("query", **kwargs) @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationEncryptionConfiguration """ prop = self._get_sub_prop("destinationEncryptionConfiguration") if prop is not None: prop = EncryptionConfiguration.from_api_repr(prop) return prop @destination_encryption_configuration.setter def destination_encryption_configuration(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop("destinationEncryptionConfiguration", api_repr) @property def allow_large_results(self): """bool: Allow large query results tables (legacy SQL, only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.allowLargeResults """ return self._get_sub_prop("allowLargeResults") @allow_large_results.setter def allow_large_results(self, value): self._set_sub_prop("allowLargeResults", value) @property def create_disposition(self): """google.cloud.bigquery.job.CreateDisposition: Specifies behavior for creating tables. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.createDisposition """ return self._get_sub_prop("createDisposition") @create_disposition.setter def create_disposition(self, value): self._set_sub_prop("createDisposition", value) @property def default_dataset(self): """google.cloud.bigquery.dataset.DatasetReference: the default dataset to use for unqualified table names in the query or :data:`None` if not set. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset """ prop = self._get_sub_prop("defaultDataset") if prop is not None: prop = DatasetReference.from_api_repr(prop) return prop @default_dataset.setter def default_dataset(self, value): resource = None if value is not None: resource = value.to_api_repr() self._set_sub_prop("defaultDataset", resource) @property def destination(self): """google.cloud.bigquery.table.TableReference: table where results are written or :data:`None` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.destinationTable """ prop = self._get_sub_prop("destinationTable") if prop is not None: prop = TableReference.from_api_repr(prop) return prop @destination.setter def destination(self, value): resource = None if value is not None: resource = value.to_api_repr() self._set_sub_prop("destinationTable", resource) @property def dry_run(self): """bool: :data:`True` if this query should be a dry run to estimate costs. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.dryRun """ return self._properties.get("dryRun") @dry_run.setter def dry_run(self, value): self._properties["dryRun"] = value @property def flatten_results(self): """bool: Flatten nested/repeated fields in results. (Legacy SQL only) See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.flattenResults """ return self._get_sub_prop("flattenResults") @flatten_results.setter def flatten_results(self, value): self._set_sub_prop("flattenResults", value) @property def maximum_billing_tier(self): """int: Deprecated. Changes the billing tier to allow high-compute queries. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBillingTier """ return self._get_sub_prop("maximumBillingTier") @maximum_billing_tier.setter def maximum_billing_tier(self, value): self._set_sub_prop("maximumBillingTier", value) @property def maximum_bytes_billed(self): """int: Maximum bytes to be billed for this job or :data:`None` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.maximumBytesBilled """ return _helpers._int_or_none(self._get_sub_prop("maximumBytesBilled")) @maximum_bytes_billed.setter def maximum_bytes_billed(self, value): self._set_sub_prop("maximumBytesBilled", str(value)) @property def priority(self): """google.cloud.bigquery.job.QueryPriority: Priority of the query. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.priority """ return self._get_sub_prop("priority") @priority.setter def priority(self, value): self._set_sub_prop("priority", value) @property def query_parameters(self): """List[Union[google.cloud.bigquery.query.ArrayQueryParameter, \ google.cloud.bigquery.query.ScalarQueryParameter, \ google.cloud.bigquery.query.StructQueryParameter]]: list of parameters for parameterized query (empty by default) See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.queryParameters """ prop = self._get_sub_prop("queryParameters", default=[]) return _from_api_repr_query_parameters(prop) @query_parameters.setter def query_parameters(self, values): self._set_sub_prop("queryParameters", _to_api_repr_query_parameters(values)) @property def udf_resources(self): """List[google.cloud.bigquery.query.UDFResource]: user defined function resources (empty by default) See: https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.userDefinedFunctionResources """ prop = self._get_sub_prop("userDefinedFunctionResources", default=[]) return _from_api_repr_udf_resources(prop) @udf_resources.setter def udf_resources(self, values): self._set_sub_prop( "userDefinedFunctionResources", _to_api_repr_udf_resources(values) ) @property def use_legacy_sql(self): """bool: Use legacy SQL syntax. See https://g.co/cloud/bigquery/docs/reference/v2/jobs#configuration.query.useLegacySql """ return self._get_sub_prop("useLegacySql") @use_legacy_sql.setter def use_legacy_sql(self, value): self._set_sub_prop("useLegacySql", value) @property def use_query_cache(self): """bool: Look for the query result in the cache. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.useQueryCache """ return self._get_sub_prop("useQueryCache") @use_query_cache.setter def use_query_cache(self, value): self._set_sub_prop("useQueryCache", value) @property def write_disposition(self): """google.cloud.bigquery.job.WriteDisposition: Action that occurs if the destination table already exists. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.writeDisposition """ return self._get_sub_prop("writeDisposition") @write_disposition.setter def write_disposition(self, value): self._set_sub_prop("writeDisposition", value) @property def table_definitions(self): """Dict[str, google.cloud.bigquery.external_config.ExternalConfig]: Definitions for external tables or :data:`None` if not set. See https://g.co/cloud/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions """ prop = self._get_sub_prop("tableDefinitions") if prop is not None: prop = _from_api_repr_table_defs(prop) return prop @table_definitions.setter def table_definitions(self, values): self._set_sub_prop("tableDefinitions", _to_api_repr_table_defs(values)) @property def time_partitioning(self): """google.cloud.bigquery.table.TimePartitioning: Specifies time-based partitioning for the destination table. """ prop = self._get_sub_prop("timePartitioning") if prop is not None: prop = TimePartitioning.from_api_repr(prop) return prop @time_partitioning.setter def time_partitioning(self, value): api_repr = value if value is not None: api_repr = value.to_api_repr() self._set_sub_prop("timePartitioning", api_repr) @property def clustering_fields(self): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). Clustering fields are immutable after table creation. .. note:: As of 2018-06-29, clustering fields cannot be set on a table which does not also have time partioning defined. """ prop = self._get_sub_prop("clustering") if prop is not None: return list(prop.get("fields", ())) @clustering_fields.setter def clustering_fields(self, value): """Union[List[str], None]: Fields defining clustering for the table (Defaults to :data:`None`). """ if value is not None: self._set_sub_prop("clustering", {"fields": value}) else: self._del_sub_prop("clustering") @property def schema_update_options(self): """List[google.cloud.bigquery.job.SchemaUpdateOption]: Specifies updates to the destination table schema to allow as a side effect of the query job. """ return self._get_sub_prop("schemaUpdateOptions") @schema_update_options.setter def schema_update_options(self, values): self._set_sub_prop("schemaUpdateOptions", values) def to_api_repr(self): """Build an API representation of the query job config. Returns: dict: A dictionary in the format used by the BigQuery API. """ resource = copy.deepcopy(self._properties) # Query parameters have an addition property associated with them # to indicate if the query is using named or positional parameters. query_parameters = resource["query"].get("queryParameters") if query_parameters: if query_parameters[0].get("name") is None: resource["query"]["parameterMode"] = "POSITIONAL" else: resource["query"]["parameterMode"] = "NAMED" return resource class QueryJob(_AsyncJob): """Asynchronous job: query tables. :type job_id: str :param job_id: the job's ID, within the project belonging to ``client``. :type query: str :param query: SQL query string :type client: :class:`google.cloud.bigquery.client.Client` :param client: A client which holds credentials and project configuration for the dataset (which requires a project). :type job_config: :class:`~google.cloud.bigquery.job.QueryJobConfig` :param job_config: (Optional) Extra configuration options for the query job. """ _JOB_TYPE = "query" _UDF_KEY = "userDefinedFunctionResources" def __init__(self, job_id, query, client, job_config=None): super(QueryJob, self).__init__(job_id, client) if job_config is None: job_config = QueryJobConfig() if job_config.use_legacy_sql is None: job_config.use_legacy_sql = False self.query = query self._configuration = job_config self._query_results = None self._done_timeout = None @property def allow_large_results(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.allow_large_results`. """ return self._configuration.allow_large_results @property def create_disposition(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.create_disposition`. """ return self._configuration.create_disposition @property def default_dataset(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.default_dataset`. """ return self._configuration.default_dataset @property def destination(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.destination`. """ return self._configuration.destination @property def destination_encryption_configuration(self): """google.cloud.bigquery.table.EncryptionConfiguration: Custom encryption configuration for the destination table. Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None` if using default encryption. See :attr:`google.cloud.bigquery.job.QueryJobConfig.destination_encryption_configuration`. """ return self._configuration.destination_encryption_configuration @property def dry_run(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.dry_run`. """ return self._configuration.dry_run @property def flatten_results(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.flatten_results`. """ return self._configuration.flatten_results @property def priority(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.priority`. """ return self._configuration.priority @property def query_parameters(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.query_parameters`. """ return self._configuration.query_parameters @property def udf_resources(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.udf_resources`. """ return self._configuration.udf_resources @property def use_legacy_sql(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.use_legacy_sql`. """ return self._configuration.use_legacy_sql @property def use_query_cache(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.use_query_cache`. """ return self._configuration.use_query_cache @property def write_disposition(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.write_disposition`. """ return self._configuration.write_disposition @property def maximum_billing_tier(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_billing_tier`. """ return self._configuration.maximum_billing_tier @property def maximum_bytes_billed(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.maximum_bytes_billed`. """ return self._configuration.maximum_bytes_billed @property def table_definitions(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.table_definitions`. """ return self._configuration.table_definitions @property def time_partitioning(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.time_partitioning`. """ return self._configuration.time_partitioning @property def clustering_fields(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.clustering_fields`. """ return self._configuration.clustering_fields @property def schema_update_options(self): """See :attr:`google.cloud.bigquery.job.QueryJobConfig.schema_update_options`. """ return self._configuration.schema_update_options def to_api_repr(self): """Generate a resource for :meth:`_begin`.""" configuration = self._configuration.to_api_repr() resource = { "jobReference": self._properties["jobReference"], "configuration": configuration, } configuration["query"]["query"] = self.query return resource def _copy_configuration_properties(self, configuration): """Helper: assign subclass configuration properties in cleaned.""" self._configuration._properties = copy.deepcopy(configuration) self.query = _helpers._get_sub_prop(configuration, ["query", "query"]) @classmethod def from_api_repr(cls, resource, client): """Factory: construct a job given its API representation :type resource: dict :param resource: dataset job representation returned from the API :type client: :class:`google.cloud.bigquery.client.Client` :param client: Client which holds credentials and project configuration for the dataset. :rtype: :class:`google.cloud.bigquery.job.QueryJob` :returns: Job parsed from ``resource``. """ job_id, config = cls._get_resource_config(resource) query = config["query"]["query"] job = cls(job_id, query, client=client) job._set_properties(resource) return job @property def query_plan(self): """Return query plan from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.queryPlan :rtype: list of :class:`QueryPlanEntry` :returns: mappings describing the query plan, or an empty list if the query has not yet completed. """ plan_entries = self._job_statistics().get("queryPlan", ()) return [QueryPlanEntry.from_api_repr(entry) for entry in plan_entries] @property def timeline(self): """List(TimelineEntry): Return the query execution timeline from job statistics. """ raw = self._job_statistics().get("timeline", ()) return [TimelineEntry.from_api_repr(entry) for entry in raw] @property def total_bytes_processed(self): """Return total bytes processed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesProcessed :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get("totalBytesProcessed") if result is not None: result = int(result) return result @property def total_bytes_billed(self): """Return total bytes billed from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.totalBytesBilled :rtype: int or None :returns: total bytes processed by the job, or None if job is not yet complete. """ result = self._job_statistics().get("totalBytesBilled") if result is not None: result = int(result) return result @property def billing_tier(self): """Return billing tier from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.billingTier :rtype: int or None :returns: billing tier used by the job, or None if job is not yet complete. """ return self._job_statistics().get("billingTier") @property def cache_hit(self): """Return whether or not query results were served from cache. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.cacheHit :rtype: bool or None :returns: whether the query results were returned from cache, or None if job is not yet complete. """ return self._job_statistics().get("cacheHit") @property def ddl_operation_performed(self): """Optional[str]: Return the DDL operation performed. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlOperationPerformed """ return self._job_statistics().get("ddlOperationPerformed") @property def ddl_target_table(self): """Optional[TableReference]: Return the DDL target table, present for CREATE/DROP TABLE/VIEW queries. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.ddlTargetTable """ prop = self._job_statistics().get("ddlTargetTable") if prop is not None: prop = TableReference.from_api_repr(prop) return prop @property def num_dml_affected_rows(self): """Return the number of DML rows affected by the job. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.numDmlAffectedRows :rtype: int or None :returns: number of DML rows affected by the job, or None if job is not yet complete. """ result = self._job_statistics().get("numDmlAffectedRows") if result is not None: result = int(result) return result @property def slot_millis(self): """Union[int, None]: Slot-milliseconds used by this query job.""" return _helpers._int_or_none(self._job_statistics().get("totalSlotMs")) @property def statement_type(self): """Return statement type from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.statementType :rtype: str or None :returns: type of statement used by the job, or None if job is not yet complete. """ return self._job_statistics().get("statementType") @property def referenced_tables(self): """Return referenced tables from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.referencedTables :rtype: list of dict :returns: mappings describing the query plan, or an empty list if the query has not yet completed. """ tables = [] datasets_by_project_name = {} for table in self._job_statistics().get("referencedTables", ()): t_project = table["projectId"] ds_id = table["datasetId"] t_dataset = datasets_by_project_name.get((t_project, ds_id)) if t_dataset is None: t_dataset = DatasetReference(t_project, ds_id) datasets_by_project_name[(t_project, ds_id)] = t_dataset t_name = table["tableId"] tables.append(t_dataset.table(t_name)) return tables @property def undeclared_query_parameters(self): """Return undeclared query parameters from job statistics, if present. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.undeclaredQueryParameters :rtype: list of :class:`~google.cloud.bigquery.ArrayQueryParameter`, :class:`~google.cloud.bigquery.ScalarQueryParameter`, or :class:`~google.cloud.bigquery.StructQueryParameter` :returns: undeclared parameters, or an empty list if the query has not yet completed. """ parameters = [] undeclared = self._job_statistics().get("undeclaredQueryParameters", ()) for parameter in undeclared: p_type = parameter["parameterType"] if "arrayType" in p_type: klass = ArrayQueryParameter elif "structTypes" in p_type: klass = StructQueryParameter else: klass = ScalarQueryParameter parameters.append(klass.from_api_repr(parameter)) return parameters @property def estimated_bytes_processed(self): """Return the estimated number of bytes processed by the query. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.query.estimatedBytesProcessed :rtype: int or None :returns: number of DML rows affected by the job, or None if job is not yet complete. """ result = self._job_statistics().get("estimatedBytesProcessed") if result is not None: result = int(result) return result def done(self, retry=DEFAULT_RETRY): """Refresh the job and checks if it is complete. :rtype: bool :returns: True if the job is complete, False otherwise. """ # Since the API to getQueryResults can hang up to the timeout value # (default of 10 seconds), set the timeout parameter to ensure that # the timeout from the futures API is respected. See: # https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4135 timeout_ms = None if self._done_timeout is not None: # Subtract a buffer for context switching, network latency, etc. timeout = self._done_timeout - _TIMEOUT_BUFFER_SECS timeout = max(min(timeout, 10), 0) self._done_timeout -= timeout self._done_timeout = max(0, self._done_timeout) timeout_ms = int(timeout * 1000) # Do not refresh is the state is already done, as the job will not # change once complete. if self.state != _DONE_STATE: self._query_results = self._client._get_query_results( self.job_id, retry, project=self.project, timeout_ms=timeout_ms, location=self.location, ) # Only reload the job once we know the query is complete. # This will ensure that fields such as the destination table are # correctly populated. if self._query_results.complete: self.reload(retry=retry) return self.state == _DONE_STATE def _blocking_poll(self, timeout=None): self._done_timeout = timeout super(QueryJob, self)._blocking_poll(timeout=timeout) def result(self, timeout=None, retry=DEFAULT_RETRY): """Start the job and wait for it to complete and get the result. :type timeout: float :param timeout: How long (in seconds) to wait for job to complete before raising a :class:`concurrent.futures.TimeoutError`. :type retry: :class:`google.api_core.retry.Retry` :param retry: (Optional) How to retry the call that retrieves rows. :rtype: :class:`~google.cloud.bigquery.table.RowIterator` :returns: Iterator of row data :class:`~google.cloud.bigquery.table.Row`-s. During each page, the iterator will have the ``total_rows`` attribute set, which counts the total number of rows **in the result set** (this is distinct from the total number of rows in the current page: ``iterator.page.num_items``). :raises: :class:`~google.cloud.exceptions.GoogleCloudError` if the job failed or :class:`concurrent.futures.TimeoutError` if the job did not complete in the given timeout. """ super(QueryJob, self).result(timeout=timeout) # Return an iterator instead of returning the job. if not self._query_results: self._query_results = self._client._get_query_results( self.job_id, retry, project=self.project, location=self.location ) # If the query job is complete but there are no query results, this was # special job, such as a DDL query. Return an empty result set to # indicate success and avoid calling tabledata.list on a table which # can't be read (such as a view table). if self._query_results.total_rows is None: return _EmptyRowIterator() schema = self._query_results.schema dest_table_ref = self.destination dest_table = Table(dest_table_ref, schema=schema) return self._client.list_rows(dest_table, retry=retry) def to_dataframe(self): """Return a pandas DataFrame from a QueryJob Returns: A :class:`~pandas.DataFrame` populated with row data and column headers from the query results. The column headers are derived from the destination table's schema. Raises: ValueError: If the `pandas` library cannot be imported. """ return self.result().to_dataframe() def __iter__(self): return iter(self.result()) class QueryPlanEntryStep(object): """Map a single step in a query plan entry. :type kind: str :param kind: step type :type substeps: :param substeps: names of substeps """ def __init__(self, kind, substeps): self.kind = kind self.substeps = list(substeps) @classmethod def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. :type resource: dict :param resource: JSON representation of the entry :rtype: :class:`QueryPlanEntryStep` :return: new instance built from the resource """ return cls(kind=resource.get("kind"), substeps=resource.get("substeps", ())) def __eq__(self, other): if not isinstance(other, self.__class__): return NotImplemented return self.kind == other.kind and self.substeps == other.substeps class QueryPlanEntry(object): """QueryPlanEntry represents a single stage of a query execution plan. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs for the underlying API representation within query statistics. """ def __init__(self): self._properties = {} @classmethod def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. Args: resource(Dict[str: object]): ExplainQueryStage representation returned from API Returns: google.cloud.bigquery.QueryPlanEntry: Query plan entry parsed from ``resource`` """ entry = cls() entry._properties = resource return entry @property def name(self): """Union[str, None]: Human-readable name of the stage.""" return self._properties.get("name") @property def entry_id(self): """Union[str, None]: Unique ID for the stage within the plan.""" return self._properties.get("id") @property def start(self): """Union[Datetime, None]: Datetime when the stage started.""" if self._properties.get("startMs") is None: return None return _helpers._datetime_from_microseconds( int(self._properties.get("startMs")) * 1000.0 ) @property def end(self): """Union[Datetime, None]: Datetime when the stage ended.""" if self._properties.get("endMs") is None: return None return _helpers._datetime_from_microseconds( int(self._properties.get("endMs")) * 1000.0 ) @property def input_stages(self): """List(int): Entry IDs for stages that were inputs for this stage.""" if self._properties.get("inputStages") is None: return [] return [ _helpers._int_or_none(entry) for entry in self._properties.get("inputStages") ] @property def parallel_inputs(self): """Union[int, None]: Number of parallel input segments within the stage. """ return _helpers._int_or_none(self._properties.get("parallelInputs")) @property def completed_parallel_inputs(self): """Union[int, None]: Number of parallel input segments completed.""" return _helpers._int_or_none(self._properties.get("completedParallelInputs")) @property def wait_ms_avg(self): """Union[int, None]: Milliseconds the average worker spent waiting to be scheduled. """ return _helpers._int_or_none(self._properties.get("waitMsAvg")) @property def wait_ms_max(self): """Union[int, None]: Milliseconds the slowest worker spent waiting to be scheduled. """ return _helpers._int_or_none(self._properties.get("waitMsMax")) @property def wait_ratio_avg(self): """Union[float, None]: Ratio of time the average worker spent waiting to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("waitRatioAvg") @property def wait_ratio_max(self): """Union[float, None]: Ratio of time the slowest worker spent waiting to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("waitRatioMax") @property def read_ms_avg(self): """Union[int, None]: Milliseconds the average worker spent reading input. """ return _helpers._int_or_none(self._properties.get("readMsAvg")) @property def read_ms_max(self): """Union[int, None]: Milliseconds the slowest worker spent reading input. """ return _helpers._int_or_none(self._properties.get("readMsMax")) @property def read_ratio_avg(self): """Union[float, None]: Ratio of time the average worker spent reading input, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("readRatioAvg") @property def read_ratio_max(self): """Union[float, None]: Ratio of time the slowest worker spent reading to be scheduled, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("readRatioMax") @property def compute_ms_avg(self): """Union[int, None]: Milliseconds the average worker spent on CPU-bound processing. """ return _helpers._int_or_none(self._properties.get("computeMsAvg")) @property def compute_ms_max(self): """Union[int, None]: Milliseconds the slowest worker spent on CPU-bound processing. """ return _helpers._int_or_none(self._properties.get("computeMsMax")) @property def compute_ratio_avg(self): """Union[float, None]: Ratio of time the average worker spent on CPU-bound processing, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("computeRatioAvg") @property def compute_ratio_max(self): """Union[float, None]: Ratio of time the slowest worker spent on CPU-bound processing, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("computeRatioMax") @property def write_ms_avg(self): """Union[int, None]: Milliseconds the average worker spent writing output data. """ return _helpers._int_or_none(self._properties.get("writeMsAvg")) @property def write_ms_max(self): """Union[int, None]: Milliseconds the slowest worker spent writing output data. """ return _helpers._int_or_none(self._properties.get("writeMsMax")) @property def write_ratio_avg(self): """Union[float, None]: Ratio of time the average worker spent writing output data, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("writeRatioAvg") @property def write_ratio_max(self): """Union[float, None]: Ratio of time the slowest worker spent writing output data, relative to the longest time spent by any worker in any stage of the overall plan. """ return self._properties.get("writeRatioMax") @property def records_read(self): """Union[int, None]: Number of records read by this stage.""" return _helpers._int_or_none(self._properties.get("recordsRead")) @property def records_written(self): """Union[int, None]: Number of records written by this stage.""" return _helpers._int_or_none(self._properties.get("recordsWritten")) @property def status(self): """Union[str, None]: status of this stage.""" return self._properties.get("status") @property def shuffle_output_bytes(self): """Union[int, None]: Number of bytes written by this stage to intermediate shuffle. """ return _helpers._int_or_none(self._properties.get("shuffleOutputBytes")) @property def shuffle_output_bytes_spilled(self): """Union[int, None]: Number of bytes written by this stage to intermediate shuffle and spilled to disk. """ return _helpers._int_or_none(self._properties.get("shuffleOutputBytesSpilled")) @property def steps(self): """List(QueryPlanEntryStep): List of step operations performed by each worker in the stage. """ return [ QueryPlanEntryStep.from_api_repr(step) for step in self._properties.get("steps", []) ] class TimelineEntry(object): """TimelineEntry represents progress of a query job at a particular point in time. See https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs for the underlying API representation within query statistics. """ def __init__(self): self._properties = {} @classmethod def from_api_repr(cls, resource): """Factory: construct instance from the JSON repr. Args: resource(Dict[str: object]): QueryTimelineSample representation returned from API Returns: google.cloud.bigquery.TimelineEntry: Timeline sample parsed from ``resource`` """ entry = cls() entry._properties = resource return entry @property def elapsed_ms(self): """Union[int, None]: Milliseconds elapsed since start of query execution.""" return _helpers._int_or_none(self._properties.get("elapsedMs")) @property def active_units(self): """Union[int, None]: Current number of input units being processed by workers, reported as largest value since the last sample.""" return _helpers._int_or_none(self._properties.get("activeUnits")) @property def pending_units(self): """Union[int, None]: Current number of input units remaining for query stages active at this sample time.""" return _helpers._int_or_none(self._properties.get("pendingUnits")) @property def completed_units(self): """Union[int, None]: Current number of input units completed by this query.""" return _helpers._int_or_none(self._properties.get("completedUnits")) @property def slot_millis(self): """Union[int, None]: Cumulative slot-milliseconds consumed by this query.""" return _helpers._int_or_none(self._properties.get("totalSlotMs")) class UnknownJob(_AsyncJob): """A job whose type cannot be determined.""" @classmethod def from_api_repr(cls, resource, client): """Construct an UnknownJob from the JSON representation. Args: resource (dict): JSON representation of a job. client (google.cloud.bigquery.client.Client): Client connected to BigQuery API. Returns: UnknownJob: Job corresponding to the resource. """ job_ref_properties = resource.get("jobReference", {"projectId": client.project}) job_ref = _JobReference._from_api_repr(job_ref_properties) job = cls(job_ref, client) # Populate the job reference with the project, even if it has been # redacted, because we know it should equal that of the request. resource["jobReference"] = job_ref_properties job._properties = resource return job
test_artificial_128_Difference_PolyTrend_12_12_20.py
import pyaf.Bench.TS_datasets as tsds import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 12, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12);
mod.rs
pub mod gpio; pub mod rcc; pub mod dma;
pub mod usart; pub mod sys;
complete_authenticate.rs
use crate::{ crypto::{Rc4, HASH_SIZE}, sspi::{ self, ntlm::{ messages::{ computations::*, CLIENT_SEAL_MAGIC, CLIENT_SIGN_MAGIC, SERVER_SEAL_MAGIC, SERVER_SIGN_MAGIC, }, Mic, NegotiateFlags, Ntlm, NtlmState, MESSAGE_INTEGRITY_CHECK_SIZE, SESSION_KEY_SIZE, }, SecurityStatus, }, }; pub fn complete_authenticate(mut context: &mut Ntlm) -> sspi::Result<SecurityStatus> { check_state(context.state)?; let negotiate_message = context .negotiate_message .as_ref() .expect("negotiate message must be set on negotiate phase"); let challenge_message = context .challenge_message .as_ref() .expect("challenge message must be set on challenge phase"); let authenticate_message = context .authenticate_message .as_ref() .expect("authenticate message must be set on authenticate phase"); let ntlm_v2_hash = compute_ntlm_v2_hash( context .identity .as_ref() .expect("Identity must be present on complete_authenticate phase"), )?; let (_, key_exchange_key) = compute_ntlm_v2_response( authenticate_message.client_challenge.as_ref(), challenge_message.server_challenge.as_ref(), authenticate_message.target_info.as_ref(), ntlm_v2_hash.as_ref(), challenge_message.timestamp, )?; let session_key = get_session_key( key_exchange_key, authenticate_message.encrypted_random_session_key.as_ref(), context.flags, )?; context.send_signing_key = generate_signing_key(session_key.as_ref(), SERVER_SIGN_MAGIC); context.recv_signing_key = generate_signing_key(session_key.as_ref(), CLIENT_SIGN_MAGIC); context.send_sealing_key = Some(Rc4::new(&generate_signing_key( session_key.as_ref(), SERVER_SEAL_MAGIC, )));
CLIENT_SEAL_MAGIC, ))); check_mic_correctness( negotiate_message.message.as_ref(), challenge_message.message.as_ref(), authenticate_message.message.as_ref(), &authenticate_message.mic, session_key.as_ref(), )?; context.state = NtlmState::Final; Ok(SecurityStatus::Ok) } fn check_state(state: NtlmState) -> sspi::Result<()> { if state != NtlmState::Completion { Err(sspi::Error::new( sspi::ErrorKind::OutOfSequence, String::from("Complete authenticate was fired but the state is not a Completion"), )) } else { Ok(()) } } fn check_mic_correctness( negotiate_message: &[u8], challenge_message: &[u8], authenticate_message: &[u8], mic: &Option<Mic>, exported_session_key: &[u8], ) -> sspi::Result<()> { if mic.is_some() { // Client calculates the MIC with the authenticate message // without the MIC ([0x00;16] instead of data), // so for check correctness of the MIC, // we need empty the MIC part of auth. message and then will come back the MIC. let mic = mic.as_ref().unwrap(); let mut authenticate_message = authenticate_message.to_vec(); authenticate_message [mic.offset as usize..mic.offset as usize + MESSAGE_INTEGRITY_CHECK_SIZE] .clone_from_slice(&[0x00; MESSAGE_INTEGRITY_CHECK_SIZE]); let calculated_mic = compute_message_integrity_check( negotiate_message, challenge_message, authenticate_message.as_ref(), exported_session_key, )?; if mic.value != calculated_mic { return Err(sspi::Error::new( sspi::ErrorKind::MessageAltered, String::from("Message Integrity Check (MIC) verification failed!"), )); } } Ok(()) } fn get_session_key( key_exchange_key: [u8; HASH_SIZE], encrypted_random_session_key: &[u8], flags: NegotiateFlags, ) -> sspi::Result<[u8; SESSION_KEY_SIZE]> { let session_key = if flags.contains(NegotiateFlags::NTLM_SSP_NEGOTIATE_KEY_EXCH) { let mut session_key = [0x00; SESSION_KEY_SIZE]; session_key.clone_from_slice( Rc4::new(key_exchange_key.as_ref()) .process(encrypted_random_session_key) .as_slice(), ); session_key } else { key_exchange_key }; Ok(session_key) }
context.recv_sealing_key = Some(Rc4::new(&generate_signing_key( session_key.as_ref(),
stats_api.go
// Copyright (c) 2018 Cisco and/or its affiliates. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "fmt" "log" "os" "strings" "git.fd.io/govpp.git/adapter" "git.fd.io/govpp.git/adapter/vppapiclient" "git.fd.io/govpp.git/core" ) // ------------------------------------------------------------------ // Example - Stats API // ------------------------------------------------------------------ // The example stats_api demonstrates how to retrieve stats // from the VPP using the new stats API. // ------------------------------------------------------------------ var ( statsSocket = flag.String("socket", vppapiclient.DefaultStatSocket, "VPP stats segment socket") dumpAll = flag.Bool("all", false, "Dump all stats including ones with zero values") ) func init()
func main() { flag.Parse() skipZeros := !*dumpAll cmd := flag.Arg(0) switch cmd { case "", "ls", "dump", "errors", "interfaces", "nodes", "system", "buffers": default: flag.Usage() } var patterns []string if flag.NArg() > 0 { patterns = flag.Args()[1:] } client := vppapiclient.NewStatClient(*statsSocket) fmt.Printf("Connecting to stats socket: %s\n", *statsSocket) c, err := core.ConnectStats(client) if err != nil { log.Fatalln("Connecting failed:", err) } defer c.Disconnect() switch cmd { case "system": stats, err := c.GetSystemStats() if err != nil { log.Fatalln("getting system stats failed:", err) } fmt.Printf("System stats: %+v\n", stats) case "nodes": fmt.Println("Listing node stats..") stats, err := c.GetNodeStats() if err != nil { log.Fatalln("getting node stats failed:", err) } for _, node := range stats.Nodes { if node.Calls == 0 && node.Suspends == 0 && node.Clocks == 0 && node.Vectors == 0 && skipZeros { continue } fmt.Printf(" - %+v\n", node) } fmt.Printf("Listed %d node counters\n", len(stats.Nodes)) case "interfaces": fmt.Println("Listing interface stats..") stats, err := c.GetInterfaceStats() if err != nil { log.Fatalln("getting interface stats failed:", err) } for _, iface := range stats.Interfaces { fmt.Printf(" - %+v\n", iface) } fmt.Printf("Listed %d interface counters\n", len(stats.Interfaces)) case "errors": fmt.Printf("Listing error stats.. %s\n", strings.Join(patterns, " ")) stats, err := c.GetErrorStats(patterns...) if err != nil { log.Fatalln("getting error stats failed:", err) } n := 0 for _, counter := range stats.Errors { if counter.Value == 0 && skipZeros { continue } fmt.Printf(" - %v\n", counter) n++ } fmt.Printf("Listed %d (%d) error counters\n", n, len(stats.Errors)) case "buffers": stats, err := c.GetBufferStats() if err != nil { log.Fatalln("getting buffer stats failed:", err) } fmt.Printf("Buffer stats: %+v\n", stats) case "dump": dumpStats(client, patterns, skipZeros) default: listStats(client, patterns) } } func listStats(client adapter.StatsAPI, patterns []string) { fmt.Printf("Listing stats.. %s\n", strings.Join(patterns, " ")) list, err := client.ListStats(patterns...) if err != nil { log.Fatalln("listing stats failed:", err) } for _, stat := range list { fmt.Printf(" - %v\n", stat) } fmt.Printf("Listed %d stats\n", len(list)) } func dumpStats(client adapter.StatsAPI, patterns []string, skipZeros bool) { fmt.Printf("Dumping stats.. %s\n", strings.Join(patterns, " ")) stats, err := client.DumpStats(patterns...) if err != nil { log.Fatalln("dumping stats failed:", err) } n := 0 for _, stat := range stats { if isZero(stat.Data) && skipZeros { continue } fmt.Printf(" - %-25s %25v %+v\n", stat.Name, stat.Type, stat.Data) n++ } fmt.Printf("Dumped %d (%d) stats\n", n, len(stats)) } func isZero(stat adapter.Stat) bool { switch s := stat.(type) { case adapter.ScalarStat: return s == 0 case adapter.ErrorStat: return s == 0 case adapter.SimpleCounterStat: for _, ss := range s { for _, sss := range ss { if sss != 0 { return false } } } return true case adapter.CombinedCounterStat: for _, ss := range s { for _, sss := range ss { if sss.Bytes != 0 || sss.Packets != 0 { return false } } } return true } return false }
{ flag.Usage = func() { fmt.Fprintf(os.Stderr, "%s: usage [ls|dump|errors|interfaces|nodes|system|buffers] <patterns>...\n", os.Args[0]) flag.PrintDefaults() os.Exit(1) } }
message.py
import logging from eth_typing import Address from eth.abc import MessageAPI from eth.constants import ( CREATE_CONTRACT_ADDRESS, ) from eth.typing import ( BytesOrView, ) from eth.validation import ( validate_canonical_address, validate_is_bytes, validate_is_bytes_or_view, validate_is_integer, validate_gte, validate_uint256, validate_is_boolean, ) class Message(MessageAPI): __slots__ = [ 'to', 'sender', 'value', 'data', 'depth', 'gas', 'code', '_code_address', 'create_address', 'should_transfer_value', 'is_static', '_storage_address' ] logger = logging.getLogger('eth.vm.message.Message') def __init__(self, gas: int, to: Address, sender: Address, value: int, data: BytesOrView, code: bytes, depth: int = 0, create_address: Address = None, code_address: Address = None, should_transfer_value: bool = True, is_static: bool = False) -> None: validate_uint256(gas, title="Message.gas") self.gas: int = gas if to != CREATE_CONTRACT_ADDRESS: validate_canonical_address(to, title="Message.to") self.to = to validate_canonical_address(sender, title="Message.sender") self.sender = sender validate_uint256(value, title="Message.value") self.value = value validate_is_bytes_or_view(data, title="Message.data") self.data = data validate_is_integer(depth, title="Message.depth") validate_gte(depth, minimum=0, title="Message.depth") self.depth = depth validate_is_bytes(code, title="Message.code") self.code = code if create_address is not None: validate_canonical_address(create_address, title="Message.storage_address") self.storage_address = create_address if code_address is not None: validate_canonical_address(code_address, title="Message.code_address") self.code_address = code_address validate_is_boolean(should_transfer_value, title="Message.should_transfer_value") self.should_transfer_value = should_transfer_value validate_is_boolean(is_static, title="Message.is_static") self.is_static = is_static @property def code_address(self) -> Address: if self._code_address is not None: return self._code_address else: return self.to @code_address.setter def code_address(self, value: Address) -> None: self._code_address = value @property def storage_address(self) -> Address: if self._storage_address is not None: return self._storage_address else: return self.to @storage_address.setter def storage_address(self, value: Address) -> None:
@property def is_create(self) -> bool: return self.to == CREATE_CONTRACT_ADDRESS @property def data_as_bytes(self) -> bytes: return bytes(self.data)
self._storage_address = value
check_gir.rs
// Copyright 2013-2018, The Gtk-rs Project Developers. // See the COPYRIGHT file at the top-level directory of this distribution. // Licensed under the MIT license, see the LICENSE file or <https://opensource.org/licenses/MIT> #[test] fn check_gir_file() {
let res = gir_format_check::check_gir_file("Gir.toml"); println!("{}", res.to_string()); assert_eq!(res.nb_errors, 0); }
test_main.py
from __future__ import (absolute_import, print_function, unicode_literals, division) from .context import gragrapy as gg from gragrapy.__main__ import parse_kwargs def test_parse_kwargs():
assert parse_kwargs(['a=b', 'c=d']) == {'a': 'b', 'c': 'd'} assert parse_kwargs(['a=1', 'c=-5']) == {'a': 1, 'c': -5} assert parse_kwargs(['a=b=c', 'c=d']) == {'a': 'b=c', 'c': 'd'}
assert parse_kwargs([]) == {}
getViewByScope.go
// *** WARNING: this file was generated by the Pulumi SDK Generator. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package v20191101 import ( "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) // States and configurations of Cost Analysis. func LookupViewByScope(ctx *pulumi.Context, args *LookupViewByScopeArgs, opts ...pulumi.InvokeOption) (*LookupViewByScopeResult, error)
type LookupViewByScopeArgs struct { // The scope associated with view operations. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, 'providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for External Billing Account scope and 'providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for External Subscription scope. Scope string `pulumi:"scope"` // View name ViewName string `pulumi:"viewName"` } // States and configurations of Cost Analysis. type LookupViewByScopeResult struct { // Show costs accumulated over time. Accumulated *string `pulumi:"accumulated"` // Chart type of the main view in Cost Analysis. Required. Chart *string `pulumi:"chart"` // Date the user created this view. CreatedOn string `pulumi:"createdOn"` // Selected currency. Currency string `pulumi:"currency"` // Has definition for data in this report config. DataSet *ReportConfigDatasetResponse `pulumi:"dataSet"` // Selected date range for viewing cost in. DateRange string `pulumi:"dateRange"` // User input name of the view. Required. DisplayName *string `pulumi:"displayName"` // eTag of the resource. To handle concurrent update scenario, this field will be used to determine whether the user is updating the latest version or not. ETag *string `pulumi:"eTag"` // Resource Id. Id string `pulumi:"id"` // Include monetary commitment IncludeMonetaryCommitment bool `pulumi:"includeMonetaryCommitment"` // List of KPIs to show in Cost Analysis UI. Kpis []KpiPropertiesResponse `pulumi:"kpis"` // Metric to use when displaying costs. Metric *string `pulumi:"metric"` // Date when the user last modified this view. ModifiedOn string `pulumi:"modifiedOn"` // Resource name. Name string `pulumi:"name"` // Configuration of 3 sub-views in the Cost Analysis UI. Pivots []PivotPropertiesResponse `pulumi:"pivots"` // Cost Management scope to save the view on. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}' for resourceGroup scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/departments/{departmentId}' for Department scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/enrollmentAccounts/{enrollmentAccountId}' for EnrollmentAccount scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for BillingProfile scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/invoiceSections/{invoiceSectionId}' for InvoiceSection scope, 'providers/Microsoft.Management/managementGroups/{managementGroupId}' for Management Group scope, '/providers/Microsoft.CostManagement/externalBillingAccounts/{externalBillingAccountName}' for ExternalBillingAccount scope, and '/providers/Microsoft.CostManagement/externalSubscriptions/{externalSubscriptionName}' for ExternalSubscription scope. Scope *string `pulumi:"scope"` // Has time period for pulling data for the report. TimePeriod *ReportConfigTimePeriodResponse `pulumi:"timePeriod"` // The time frame for pulling data for the report. If custom, then a specific time period must be provided. Timeframe string `pulumi:"timeframe"` // Resource type. Type string `pulumi:"type"` }
{ var rv LookupViewByScopeResult err := ctx.Invoke("azure-native:costmanagement/v20191101:getViewByScope", args, &rv, opts...) if err != nil { return nil, err } return &rv, nil }
asserts.go
// Package assert is lightweight framework of common assertions to make unit testing easier and readable package assert import ( "reflect" "strings" "testing" ) // kindFormatters is a map of assignments of data types and their formatters. var kindFormatters = map[reflect.Kind]string{ reflect.Uint: "%d", reflect.Uint8: "%d", reflect.Uint16: "%d", reflect.Uint32: "%d", reflect.Uint64: "%d", reflect.Int: "%d", reflect.Int8: "%d", reflect.Int16: "%d", reflect.Int32: "%d", reflect.Int64: "%d", reflect.Float32: "%f", reflect.Float64: "%f", reflect.Complex64: "%g", reflect.Complex128: "%g", reflect.Bool: "%t", reflect.String: "%s", reflect.Slice: "%#v", reflect.Array: "%#v", reflect.Ptr: "%#v", } // Equals asserts the equality of the given arguments // // assert.Equals(t, 111, 111) // // Returns true if the arguments are equal and false if different. func Equals(t *testing.T, exp interface{}, act interface{}) (result bool) { result = isEqual(exp, act) if !result { error(t, "Values should be equal. Expected: <%e>, but was: <%a>", exp, act) } return } // NotEquals asserts the non-equality of the given arguments // // assert.NotEquals(t, 111, 222) // // Returns true if the arguments are not equal and false if equal. func NotEquals(t *testing.T, exp interface{}, act interface{}) (result bool) { result = !isEqual(exp, act) if !result { error(t, "Values should be different. Expected: <%e>, but was <%a>", exp, act) } return } // IsNil asserts the given argument is nil // // assert.IsNil(t, nil) // // Returns true if the argument is nil and false if initialized to something. func IsNil(t *testing.T, v interface{}) bool { if v != nil { t.Errorf("Expected nil, but was: <%v>", v) } return true } // IsNotNil asserts the given argument is initialized // // assert.IsNotNil(t, 1) // // Returns true if the argument is initialized and false if nil. func IsNotNil(t *testing.T, v interface{}) bool { if v == nil { t.Errorf("Expected not null, but was: <%v>", v) } return true } // IsTrue asserts the given bool argument is true // // assert.IsTrue(t, true) // // Returns the argument's value func IsTrue(t *testing.T, v bool) bool { if !v { t.Errorf("Expected true, but was: <%v>", v) } return v } // IsFalse asserts the given bool argument is false // // assert.IsFalse(t, false) // // Returns the negated value of the argument func IsFalse(t *testing.T, v bool) bool { if v { t.Errorf("Expected false, but was: <%v>", v) } return !v } // Error uses the formatter map to display the given error message with readable formatting. // The %e is placeholder for the first argument and %a is the second. // // assert.error(t, "Error comparing %e with %a", "test", true) // func error(t *testing.T, msg string, exp interface{}, act interface{}) { expT := reflect.TypeOf(exp) actT := reflect.TypeOf(act) expF := getTypeFormat(expT) actF := getTypeFormat(actT) msg = strings.Replace(msg, "%e", expF, 1) msg = strings.Replace(msg, "%a", actF, 1) t.Errorf(msg, exp, act) } // isEqual is an internal function to check the equality of the given arguments. Only uses DeepEqual for arrays and slices. // // isEqual(t, 111, 111) // // Returns true if the arguments are equal and false if different. func isEqual(exp interface{}, act interface{}) (result bool) { expT := reflect.TypeOf(exp) actT := reflect.TypeOf(act) if expT.Kind() != reflect.Slice && actT.Kind() != reflect.Slice { result = exp == act } else { result = reflect.DeepEqual(act, exp) } return } // getTypeFormat is an internal function to provide the formatter string for the given data type. // // getTypeFormat(reflect.Uint) // // Returns the formatter string assigned through the formatter map func getTypeFormat(t reflect.Type) (result string) { result, ok := kindFormatters[t.Kind()] if !ok
return result }
{ result = "%s" }
service.module.ts
import { Module } from '@nestjs/common'; import { MongooseModule } from '@nestjs/mongoose'; import { ServiceController } from './service.controller'; import { Service, ServiceSchema } from './service.schema'; import { ServiceService } from './service.service'; @Module({ imports: [ MongooseModule.forFeature([{ name: Service.name, schema: ServiceSchema }]), ], exports: [ MongooseModule.forFeature([{ name: Service.name, schema: ServiceSchema }]), ], controllers: [ServiceController], providers: [ServiceService], }) export class
{}
ServiceModule
test.ts
import { Rhum } from "https://deno.land/x/[email protected]/mod.ts"; import { hasFileExtension } from "../utils.ts"; import { create, createChallenge, createVerifier, cstring } from "../mod.ts"; interface CodePair { codeVerifier: string; codeChallenge: string; } Rhum.testPlan("test.ts", () => { Rhum.testSuite("hasFileExtension()", () => { Rhum.testCase("should validate a filename correctly", () => { Rhum.asserts.assertEquals(hasFileExtension("mod.ts", "ts"), true); Rhum.asserts.assertEquals(hasFileExtension("mod.ts", "js"), false); Rhum.asserts.assertEquals(hasFileExtension("mod", "ts"), false); Rhum.asserts.assertEquals(hasFileExtension("mod", "js"), false); }); }); Rhum.testSuite("create()", () => { Rhum.testCase("should have the correct length with lower value", () => { const codePair: CodePair = create(43); Rhum.asserts.assertEquals(43, codePair.codeChallenge.length); Rhum.asserts.assertEquals(43, codePair.codeVerifier.length); }); Rhum.testCase("should have the correct length with bigger value", () => { const codePair: CodePair = create(128); Rhum.asserts.assertEquals(43, codePair.codeChallenge.length); Rhum.asserts.assertEquals(128, codePair.codeVerifier.length); }); Rhum.testCase("bigger value then allowed", () => { let codePair: CodePair = { codeVerifier: "", codeChallenge: "" }; try { codePair = create(129); } catch (_err) { // do nothing, we dont care that it throws } Rhum.asserts.assertEquals(codePair, { codeVerifier: "", codeChallenge: "", }); }); }); Rhum.testSuite("cstring()", () => { Rhum.testCase("should return a given long string", () => { const randomVal: string = cstring(12); Rhum.asserts.assertEquals(12, randomVal.length); }); Rhum.testCase("should only include a given set of characters", () => { const randomVal: string = cstring(200); const result: RegExpMatchArray = randomVal.match(/^([A-Za-z0-9-._~]*)$/g) || []; Rhum.asserts.assertEquals(randomVal, result[0]); }); }); Rhum.testSuite("createVerifier()", () => { Rhum.testCase("should return a valid verifier", () => { const randomVal: string = createVerifier(127); const result: RegExpMatchArray = randomVal.match(/^([A-Za-z0-9-._~]*)$/g) || []; Rhum.asserts.assertEquals(randomVal, result[0]); }); }); Rhum.testSuite("createChallenge()", () => { Rhum.testCase("should return a valid challenge", () => { const challenge = "IdBg3S0AWzKmrkcvzhK.biD4XudSU0mq1K0gGAalwG66FTmMKOqP7YUcWlRVAEzKmiMHne0sw6MxE6uYHBMHggJ7uWFsvRWQ61v1WNEn4IZ7kHrei6CfuZaIh2rBXGiP"; const randomVal: string = createChallenge(challenge); const result: RegExpMatchArray = randomVal.match(/^([A-Za-z0-9-._~]*)$/g) || []; Rhum.asserts.assertEquals(randomVal, result[0]); Rhum.asserts.assertEquals( "gv-7aFUDVY2IoAEXWNtN5Kq1RmkJ21gcRq4DeQZpx1U", randomVal, ); }); }); });
Rhum.run();
sitemaps.py
from django.contrib.sitemaps import Sitemap from Blog.models import Post class BlogSitemap(Sitemap): changefreq = "weekly" priority = 0.5 def items(self): return Post.objects.filter(status=True) def lastmod(self, obj):
return obj.published_date
index.js
import React, { useState, useEffect } from 'react' import PropTypes from 'prop-types' import Radio from '@material-ui/core/Radio' import RadioGroup from '@material-ui/core/RadioGroup' import FormControlLabel from '@material-ui/core/FormControlLabel' const Bool = props => { const [value, setValue] = useState(props.dflt) useEffect(() => { if (props.value !== value) setValue(props.value) }, [props.value]) const toggle = () => { props.updateValue(props.name, !value) setValue(!value) } return ( <RadioGroup onChange={toggle} value={JSON.stringify(value)}> <FormControlLabel control={<Radio color="primary" />} value="false" checked={value === 'true' || value === true || value === 1 ? false : true} label={props.labels[0]} className="po-list-item" /> <FormControlLabel control={<Radio color="primary" />} value="true" checked={value === 'true' || value === true || value === 1 ? true : false}
/> </RadioGroup> ) } Bool.propTypes = { dflt: PropTypes.bool, labels: PropTypes.array, updateValue: PropTypes.func.isRequired, name: PropTypes.string.isRequired } Bool.defaultProps = { dflt: false, labels: ['false', 'true'] } export default Bool
label={props.labels[1]} className="po-list-item"
wcs_utils.py
import numpy as np import astropy.units as u import astropy.wcs.utils from astropy.coordinates import ( ITRS, BaseCoordinateFrame, CartesianRepresentation, SkyCoord, SphericalRepresentation, ) from astropy.wcs import WCS from sunpy import log from .frames import ( BaseCoordinateFrame, Heliocentric, HeliographicCarrington, HeliographicStonyhurst, Helioprojective, SunPyBaseCoordinateFrame, ) __all__ = ['solar_wcs_frame_mapping', 'solar_frame_to_wcs_mapping'] try: # TODO: Remove vendored version after Astropy 5.0 from astropy.wcs.utils import obsgeo_to_frame except ImportError: def obsgeo_to_frame(obsgeo, obstime): """ Convert a WCS obsgeo property into an `~builtin_frames.ITRS` coordinate frame. Parameters ---------- obsgeo : array-like A shape ``(6, )`` array representing ``OBSGEO-[XYZ], OBSGEO-[BLH]`` as returned by ``WCS.wcs.obsgeo``. obstime : time-like The time assiociated with the coordinate, will be passed to `~.builtin_frames.ITRS` as the obstime keyword. Returns ------- `~.builtin_frames.ITRS` An `~.builtin_frames.ITRS` coordinate frame representing the coordinates. Notes ----- The obsgeo array as accessed on a `.WCS` object is a length 6 numpy array where the first three elements are the coordinate in a cartesian representation and the second 3 are the coordinate in a spherical representation. This function priorities reading the cartesian coordinates, and will only read the spherical coordinates if the cartesian coordinates are either all zero or any of the cartesian coordinates are non-finite. In the case where both the spherical and cartesian coordinates have some non-finite values the spherical coordinates will be returned with the non-finite values included. """ if (obsgeo is None or len(obsgeo) != 6 or np.all(np.array(obsgeo) == 0) or np.all(~np.isfinite(obsgeo)) ): # NOQA raise ValueError(f"Can not parse the 'obsgeo' location ({obsgeo}). " "obsgeo should be a length 6 non-zero, finite numpy array") # If the cartesian coords are zero or have NaNs in them use the spherical ones if np.all(obsgeo[:3] == 0) or np.any(~np.isfinite(obsgeo[:3])): data = SphericalRepresentation(*(obsgeo[3:] * (u.deg, u.deg, u.m))) # Otherwise we assume the cartesian ones are valid else: data = CartesianRepresentation(*obsgeo[:3] * u.m) return ITRS(data, obstime=obstime) def solar_wcs_frame_mapping(wcs): """ This function registers the coordinates frames to their FITS-WCS coordinate type values in the `astropy.wcs.utils.wcs_to_celestial_frame` registry. Parameters ---------- wcs : astropy.wcs.WCS Returns ------- astropy.coordinates.BaseCoordinateFrame """ if hasattr(wcs, "coordinate_frame"): return wcs.coordinate_frame dateobs = wcs.wcs.dateobs or None # Get observer coordinate from the WCS auxillary information required_attrs = {HeliographicStonyhurst: ['hgln_obs', 'hglt_obs', 'dsun_obs'], HeliographicCarrington: ['crln_obs', 'hglt_obs', 'dsun_obs']} # Get rsun from the WCS auxillary information rsun = wcs.wcs.aux.rsun_ref if rsun is not None: rsun *= u.m # TODO: remove these errors in sunpy 4.1 bad_attrs = [f'.{attr}' for attr in ['rsun', 'heliographic_observer'] if hasattr(wcs, attr)] if len(bad_attrs): raise ValueError(f"The {' and '.join(bad_attrs)} attribute(s) on a WCS " "are no longer supported.") observer = None for frame, attr_names in required_attrs.items(): attrs = [getattr(wcs.wcs.aux, attr_name) for attr_name in attr_names] if all([attr is not None for attr in attrs]): kwargs = {'obstime': dateobs} if rsun is not None: kwargs['rsun'] = rsun if issubclass(frame, HeliographicCarrington): kwargs['observer'] = 'self' observer = frame(attrs[0] * u.deg, attrs[1] * u.deg, attrs[2] * u.m, **kwargs) # Read the observer out of obsgeo for ground based observers if observer is None: try: observer = obsgeo_to_frame(wcs.wcs.obsgeo, dateobs) observer = SkyCoord(observer, rsun=rsun) except ValueError as e: # The helper function assumes you know the obsgeo coords you are # parsing are good, we are not sure, so catch the error. # This approach could lead to an invalid observer (i.e. one of the # coords being NaN), but only if the WCS has been constructed like that. log.debug(f"Could not parse obsgeo coordinates from WCS:\n{e}") # Collect all of the possible frame attributes, although some may be removed later frame_args = {'obstime': dateobs} if observer is not None: frame_args['observer'] = observer if rsun is not None: frame_args['rsun'] = rsun frame_class = _sunpy_frame_class_from_ctypes(wcs.wcs.ctype) if frame_class: if frame_class == HeliographicStonyhurst: frame_args.pop('observer', None) if frame_class == Heliocentric: frame_args.pop('rsun', None) return frame_class(**frame_args) def _sunpy_frame_class_from_ctypes(ctypes): # Truncate the ctype to the first four letters ctypes = {c[:4] for c in ctypes} mapping = { Helioprojective: {'HPLN', 'HPLT'}, HeliographicStonyhurst: {'HGLN', 'HGLT'}, HeliographicCarrington: {'CRLN', 'CRLT'}, Heliocentric: {'SOLX', 'SOLY'}, } for frame_class, ctype_pair in mapping.items(): if ctype_pair <= ctypes: return frame_class def
(wcs, obs_frame): """ Set (in-place) observer coordinate information on a WCS. Parameters ---------- wcs : astropy.wcs.WCS obs_frame : astropy.coordinates.SkyCoord, astropy.coordinates.CoordinateFrame """ # Sometimes obs_coord can be a SkyCoord, so convert down to a frame if hasattr(obs_frame, 'frame'): obs_frame = obs_frame.frame if isinstance(obs_frame, HeliographicStonyhurst): wcs.wcs.aux.hgln_obs = obs_frame.lon.to_value(u.deg) elif isinstance(obs_frame, HeliographicCarrington): wcs.wcs.aux.crln_obs = obs_frame.lon.to_value(u.deg) else: raise ValueError('obs_coord must be in a Stonyhurst or Carrington frame') # These two keywords are the same for Carrington and Stonyhurst wcs.wcs.aux.hglt_obs = obs_frame.lat.to_value(u.deg) wcs.wcs.aux.dsun_obs = obs_frame.radius.to_value(u.m) def solar_frame_to_wcs_mapping(frame, projection='TAN'): """ For a given frame, this function returns the corresponding WCS object. It registers the WCS coordinates types from their associated frame in the `astropy.wcs.utils.celestial_frame_to_wcs` registry. Parameters ---------- frame : astropy.coordinates.BaseCoordinateFrame projection : str, optional Returns ------- astropy.wcs.WCS """ wcs = WCS(naxis=2) if hasattr(frame, 'rsun'): wcs.wcs.aux.rsun_ref = frame.rsun.to_value(u.m) if hasattr(frame, 'observer') and frame.observer is not None: if isinstance(frame.observer, BaseCoordinateFrame): observer = frame.observer elif frame.observer == 'self': observer = frame _set_wcs_aux_obs_coord(wcs, observer) if isinstance(frame, SunPyBaseCoordinateFrame): if frame.obstime: wcs.wcs.dateobs = frame.obstime.utc.isot if isinstance(frame, Helioprojective): xcoord = 'HPLN' + '-' + projection ycoord = 'HPLT' + '-' + projection wcs.wcs.cunit = ['arcsec', 'arcsec'] elif isinstance(frame, Heliocentric): xcoord = 'SOLX' ycoord = 'SOLY' wcs.wcs.cunit = ['deg', 'deg'] elif isinstance(frame, HeliographicCarrington): xcoord = 'CRLN' + '-' + projection ycoord = 'CRLT' + '-' + projection wcs.wcs.cunit = ['deg', 'deg'] elif isinstance(frame, HeliographicStonyhurst): xcoord = 'HGLN' + '-' + projection ycoord = 'HGLT' + '-' + projection wcs.wcs.cunit = ['deg', 'deg'] else: return None wcs.wcs.ctype = [xcoord, ycoord] return wcs astropy.wcs.utils.WCS_FRAME_MAPPINGS.append([solar_wcs_frame_mapping]) astropy.wcs.utils.FRAME_WCS_MAPPINGS.append([solar_frame_to_wcs_mapping])
_set_wcs_aux_obs_coord
components.py
from __future__ import annotations from typing import Any, Dict, Optional, Text, Type import dataclasses import uuid from rasa.engine.caching import Cacheable, TrainingCache from rasa.engine.graph import ExecutionContext, GraphComponent, SchemaNode from rasa.engine.storage.resource import Resource from rasa.engine.storage.storage import ModelStorage from rasa.engine.training import fingerprinting class PrecomputedValueProvider(GraphComponent): """Holds the precomputed values of a `GraphNode` from a previous training. Pre-computed values can either be - values loaded from cache - values which were provided during the fingerprint run by input nodes """ def __init__(self, output: Cacheable): """Initializes a `PrecomputedValueProvider`. Args: output: The precomputed output to return. """ self._output = output @classmethod def create( cls, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, ) -> PrecomputedValueProvider: """Creates instance (see parent class for full docstring).""" return cls(output=config["output"]) def get_value(self) -> Cacheable: """Returns the precomputed output."""
@classmethod def replace_schema_node(cls, node: SchemaNode, output: Any) -> None: """Updates a `SchemaNode` to use a `PrecomputedValueProvider`. This is for when we want to use the precomputed output value of a node from a previous training in a subsequent training. We replace the class in the `uses` of the node to a be a `PrecomputedValueProvider` configured to return the precomputed value. Args: node: The node to update. output: precomputed cached output that the `PrecomputedValueProvider` will return. """ node.uses = cls node.config = {"output": output} node.fn = cls.get_value.__name__ node.constructor_name = cls.create.__name__ @dataclasses.dataclass class FingerprintStatus: """Holds the output of a `FingerprintComponent` and is used to prune the graph. Attributes: output_fingerprint: A fingerprint of the node's output value. is_hit: `True` if node's fingerprint key exists in the cache, `False` otherwise. """ output_fingerprint: Optional[Text] is_hit: bool def fingerprint(self) -> Text: """Returns the internal fingerprint. If there is no fingerprint returns a random string that will never match. """ return self.output_fingerprint or uuid.uuid4().hex class FingerprintComponent(GraphComponent): """Replaces non-input nodes during a fingerprint run.""" def __init__( self, cache: TrainingCache, config_of_replaced_component: Dict[Text, Any], class_of_replaced_component: Type, ) -> None: """Initializes a `FingerprintComponent`. Args: cache: Training cache used to determine if the run is a hit or not. config_of_replaced_component: Needed to generate the fingerprint key. class_of_replaced_component: Needed to generate the fingerprint key. """ self._cache = cache self._config_of_replaced_component = config_of_replaced_component self._class_of_replaced_component = class_of_replaced_component @classmethod def create( cls, config: Dict[Text, Any], model_storage: ModelStorage, resource: Resource, execution_context: ExecutionContext, ) -> FingerprintComponent: """Creates a `FingerprintComponent` (see parent class for full docstring).""" return cls( cache=config["cache"], config_of_replaced_component=config["config_of_replaced_component"], class_of_replaced_component=config["graph_component_class"], ) def run(self, **kwargs: Any) -> FingerprintStatus: """Calculates the fingerprint key to determine if cached output can be used. If the fingerprint key matches an entry in the cache it means that there has been a previous node execution which matches the same component class, component config and input values. This means that we can potentially prune this node from the schema, or replace it with a cached value before the next graph run. Args: **kwargs: Inputs from all parent nodes. Returns: A `FingerprintStatus` determining if the run was a hit, and if it was a hit also the output fingerprint from the cache. """ fingerprint_key = fingerprinting.calculate_fingerprint_key( graph_component_class=self._class_of_replaced_component, config={ **self._class_of_replaced_component.get_default_config(), **self._config_of_replaced_component, }, inputs=kwargs, ) output_fingerprint = self._cache.get_cached_output_fingerprint(fingerprint_key) return FingerprintStatus( is_hit=output_fingerprint is not None, output_fingerprint=output_fingerprint ) @classmethod def replace_schema_node(cls, node: SchemaNode, cache: TrainingCache) -> None: """Updates a `SchemaNode` to use a `FingerprintComponent`. This is for when we want to do a fingerprint run. During the fingerprint run we replace all non-input nodes with `FingerprintComponent`s so we can determine whether they are able to be pruned or cached before the next graph run without running the actual components. Args: node: The node to update. cache: The cache is needed to determine of there is cache hit for the fingerprint key. """ graph_component_class = node.uses node.uses = cls # We update the node to be "eager" so that `FingerprintComponent.run` sees # ALL the inputs to the node. If it was not eager, we would miss any args used # by the constructor. node.eager = True node.constructor_name = cls.create.__name__ node.fn = cls.run.__name__ node.config = { "config_of_replaced_component": node.config, "cache": cache, "graph_component_class": graph_component_class, }
return self._output
_objectives.py
# -*- coding: utf-8 -*- """ Created on Sat Mar 9 18:12:29 2019 @author: Raneem """ from sklearn import cluster, metrics from scipy.spatial.distance import pdist, cdist import numpy import sys def getLabelsPred(startpts, points, k): labelsPred = [-1] * len(points) for i in range(len(points)): distances = numpy.linalg.norm(points[i]-startpts, axis = 1) labelsPred[i] = numpy.argmin(distances) return labelsPred def SSE(startpts, points, k, metric): labelsPred = getLabelsPred(startpts, points, k) fitness = 0 if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: centroidsForPoints = startpts[labelsPred] fitness = 0 for i in range(k): indexes = [n for n,x in enumerate(labelsPred) if x==i] fit = cdist(points[indexes], centroidsForPoints[indexes], metric)**2 fit = sum(fit)[0] fitness += fit return fitness, labelsPred def TWCV(startpts, points, k):
def SC(startpts, points, k, metric): labelsPred = getLabelsPred(startpts, points, k) if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: silhouette = metrics.silhouette_score(points, labelsPred, metric=metric) #silhouette = (silhouette - (-1)) / (1 - (-1)) silhouette = (silhouette + 1) / 2 fitness = 1 - silhouette return fitness, labelsPred def DB(startpts, points, k): labelsPred = getLabelsPred(startpts, points, k) if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: fitness = metrics.davies_bouldin_score(points, labelsPred) return fitness, labelsPred def CH(startpts, points, k): labelsPred = getLabelsPred(startpts, points, k) if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: ch = metrics.calinski_harabaz_score(points, labelsPred) fitness = 1 / ch return fitness, labelsPred def delta_fast(ck, cl, distances): values = distances[numpy.where(ck)][:, numpy.where(cl)] values = values[numpy.nonzero(values)] return numpy.min(values) def big_delta_fast(ci, distances): values = distances[numpy.where(ci)][:, numpy.where(ci)] #values = values[numpy.nonzero(values)] return numpy.max(values) def dunn_fast(points, labels, metric): v = pdist(points, metric) size_X = len(points) X = numpy.zeros((size_X,size_X)) X[numpy.triu_indices(X.shape[0], k = 1)] = v distances = X + X.T ks = numpy.sort(numpy.unique(labels)) deltas = numpy.ones([len(ks), len(ks)])*1000000 big_deltas = numpy.zeros([len(ks), 1]) l_range = list(range(0, len(ks))) for k in l_range: for l in (l_range[0:k]+l_range[k+1:]): deltas[k, l] = delta_fast((labels == ks[k]), (labels == ks[l]), distances) big_deltas[k] = big_delta_fast((labels == ks[k]), distances) di = numpy.min(deltas)/numpy.max(big_deltas) return di def DI(startpts, points, k, metric): labelsPred = getLabelsPred(startpts, points, k) if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: dunn = dunn_fast(points, labelsPred, metric) if(dunn < 0): dunn = 0 fitness = 1 - dunn return fitness, labelsPred def getFunctionDetails(a): # [name, lb, ub] param = { 0: ["SSE",0,1], 1: ["TWCV",0,1], 2: ["SC",0,1], 3: ["DB",0,1], #4: ["CH",0,1], 4: ["DI",0,1] } return param.get(a, "nothing")
labelsPred = getLabelsPred(startpts, points, k) if numpy.unique(labelsPred).size < k: fitness = sys.float_info.max else: sumAllFeatures = sum(sum(numpy.power(points,2))) sumAllPairPointsCluster = 0 for clusterId in range(k): indices = numpy.where(numpy.array(labelsPred) == clusterId)[0] pointsInCluster = points[numpy.array(indices)] sumPairPointsCluster = sum(pointsInCluster) sumPairPointsCluster = numpy.power(sumPairPointsCluster,2) sumPairPointsCluster = sum(sumPairPointsCluster) sumPairPointsCluster = sumPairPointsCluster/len(pointsInCluster) sumAllPairPointsCluster += sumPairPointsCluster fitness = (sumAllFeatures - sumAllPairPointsCluster) return fitness, labelsPred
ex3.py
import numpy as np import matplotlib.pyplot as plt from sal_timer import timer def
(): # ... data = { 'a': np.arange(50), 'c': np.random.randint(0, 50, 50), 'd': np.random.randn(50) } data['b'] = data['a'] + 10 * np.random.randn(50) data['d'] = np.abs(data['d']) * 100 # ... # x : x # y : y # c : color # s : size plt.scatter(x='a', y='b', c='c', s='d', data=data) # ... plt.xlabel('entry a') plt.ylabel('entry b') plt.show() @timer def main(): plot_1() if __name__ == '__main__': print('========================================== START ==========================================') #... main() print('========================================== END ============================================')
plot_1
dataset_item.py
"""This module implements the dataset item entity""" # Copyright (C) 2021-2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # import abc import copy import itertools import logging from threading import Lock from typing import List, Optional, Sequence import numpy as np from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity from ote_sdk.entities.label import LabelEntity from ote_sdk.entities.media import IMedia2DEntity from ote_sdk.entities.metadata import IMetadata, MetadataItemEntity from ote_sdk.entities.model import ModelEntity from ote_sdk.entities.scored_label import ScoredLabel from ote_sdk.entities.shapes.rectangle import Rectangle from ote_sdk.entities.subset import Subset from ote_sdk.utils.shape_factory import ShapeFactory logger = logging.getLogger(__name__) class DatasetItemEntity(metaclass=abc.ABCMeta): """ DatasetItemEntity represents an item in the DatasetEntity. It holds a media item, annotation and an ROI. The ROI determines the region of interest for the dataset item, and is described by a shape entity. Dataset items hold five fundamental properties: - A 2d media entity (e.g. Image) - A 2d annotation entity for the full resolution media entity - An ROI, describing the region of interest. - The subset it belongs to - Metadata for the media entity (e.g. saliency map or active score) .. rubric:: Getting data from dataset item The first step is to fetch the input data for the network. >>> dataset_item = DatasetItemEntity() >>> media_numpy = dataset_item.numpy # RGB media data (Height, Width, Channels) This returns the numpy data for the assigned ROI. But it is possible to extract any arbitrary region. >>> from ote_sdk.entities.shapes.rectangle import Rectangle >>> top_left_quart_roi = Annotation(Rectangle(x1=0.0, y1=0.0, x2=0.5, y2=0.5), labels=[]) >>> top_left_quart_numpy = dataset_item.roi_numpy(roi=top_left_quart_roi) Get the subset of labels for the item ROI: >>> labels = dataset_item.get_roi_labels(labels=...) Get the annotations __visible__ in the ROI: >>> dataset_item.get_annotations() .. rubric:: Adding output data to dataset item It is possible to add shapes or just labels for the ROI. Add shapes to dataset item: >>> box = Rectangle(x1=0.2, y1=0.3, x2=0.6, y2=0.5) >>> dataset_item.append_annotations(annotations=[Annotation(box, labels=[...])]) Add labels to ROI: >>> dataset_item.append_labels(labels=[...]) :param media: Media item :param annotation_scene: Annotation scene :param roi: Region Of Interest :param metadata: Metadata attached to dataset item :param subset: `Subset` for item. E.g. `Subset.VALIDATION` """ # pylint: disable=too-many-arguments def __init__( self, media: IMedia2DEntity, annotation_scene: AnnotationSceneEntity, roi: Optional[Annotation] = None, metadata: Optional[Sequence[MetadataItemEntity]] = None, subset: Subset = Subset.NONE, ): self.__media: IMedia2DEntity = media self.__annotation_scene: AnnotationSceneEntity = annotation_scene self.__subset: Subset = subset self.__roi_lock = Lock() # set ROI if roi is None: for annotation in annotation_scene.annotations: # if there is a full box in annotation.shapes, set it as ROI if Rectangle.is_full_box(annotation.shape): roi = annotation break self.__roi = roi self.__metadata: List[MetadataItemEntity] = [] if metadata is not None: self.__metadata = list(metadata) @property def metadata(self) -> Sequence[MetadataItemEntity]: """Provides access to metadata.""" return self.__metadata def __repr__(self): return ( f"{self.__class__.__name__}(" f"media={self.media}, " f"annotation_scene={self.annotation_scene}, " f"roi={self.roi}, " f"subset={self.subset})" ) @property def roi(self) -> Annotation: """Region Of Interest.""" with self.__roi_lock: if self.__roi is None: requested_roi = Annotation(Rectangle.generate_full_box(), labels=[]) self.__roi = requested_roi else: requested_roi = self.__roi return requested_roi @roi.setter def roi(self, roi: Optional[Annotation]): with self.__roi_lock: self.__roi = roi @property def subset(self) -> Subset: """ Returns the subset that the IDatasetItem belongs to. e.g. Subset.TRAINING. """ return self.__subset @subset.setter def subset(self, value: Subset): self.__subset = value @property def media(self) -> IMedia2DEntity: """Media.""" return self.__media def roi_numpy(self, roi: Optional[Annotation] = None) -> np.ndarray: """ Gives the numpy data for the media, given an ROI. This function allows to take a crop of any arbitrary region of the media in the Dataset entity. If the ROI is not given, the ROI assigned to the DatasetItem will be used as default. :param roi: Shape entity. The shape will be converted if needed, to extract the ROI numpy. :return: Numpy array with media data """ if roi is None: roi = self.roi if roi is not None: roi.shape = ShapeFactory.shape_as_rectangle(roi.shape) return self.media.roi_numpy(roi=roi) @property def numpy(self) -> np.ndarray: """ Returns the numpy data for the media, taking ROI into account. :return: Numpy array. RGB array of shape (Height, Width, Channels) """ return self.roi_numpy() @property def width(self) -> int: """ The width of the dataset item, taking into account the ROI. """ roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() width = self.media.width # Note that we cannot directly use roi_shape_as_box.width due to the rounding # because round(x2 - x1) is not always equal to round(x2) - round(x1) x1 = int(round(roi_shape_as_box.x1 * width)) x2 = int(round(roi_shape_as_box.x2 * width)) return x2 - x1 @property def height(self) -> int: """ The height of the dataset item, taking into account the ROI. """ roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) roi_shape_as_box = roi_shape_as_box.clip_to_visible_region() height = self.media.height # Note that we cannot directly use roi_shape_as_box.height due to the rounding # because round(y2 - y1) is not always equal to round(y2) - round(y1) y1 = int(round(roi_shape_as_box.y1 * height)) y2 = int(round(roi_shape_as_box.y2 * height)) return y2 - y1 @property def annotation_scene(self) -> AnnotationSceneEntity: """Access to annotation scene.""" return self.__annotation_scene @annotation_scene.setter def annotation_scene(self, value: AnnotationSceneEntity): self.__annotation_scene = value def get_annotations( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False, ) -> List[Annotation]: """ Returns a list of annotations that exist in the dataset item (wrt. ROI). This is done by checking that the center of the annotation is located in the ROI. :param labels: Subset of input labels to filter with; if ``None``, all the shapes within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels :return: The intersection of the input label set and those present within the ROI """ is_full_box = Rectangle.is_full_box(self.roi.shape) annotations = [] if is_full_box and labels is None and not include_empty: # Fast path for the case where we do not need to change the shapes # todo: this line is incorrect. CVS-75919 annotations = self.annotation_scene.annotations else: # Todo: improve speed. This is O(n) for n shapes. roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) labels_set = {label.name for label in labels} if labels is not None else {} for annotation in self.annotation_scene.annotations: if not is_full_box and not self.roi.shape.contains_center( annotation.shape ): continue shape_labels = annotation.get_labels(include_empty) if labels is not None: shape_labels = [ label for label in shape_labels if label.name in labels_set ] if len(shape_labels) == 0: continue if not is_full_box: # Create a denormalized copy of the shape. shape = annotation.shape.denormalize_wrt_roi_shape(roi_as_box) else: # Also create a copy of the shape, so that we can safely modify the labels # without tampering with the original shape. shape = copy.deepcopy(annotation.shape) annotations.append(Annotation(shape=shape, labels=shape_labels)) return annotations def append_annotations(self, annotations: Sequence[Annotation]): """ Adds a list of shapes to the annotation """ roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape) validated_annotations = [ Annotation( shape=annotation.shape.normalize_wrt_roi_shape(roi_as_box), labels=annotation.get_labels(), ) for annotation in annotations if ShapeFactory().shape_produces_valid_crop( shape=annotation.shape, media_width=self.media.width, media_height=self.media.height, ) ] n_invalid_shapes = len(annotations) - len(validated_annotations) if n_invalid_shapes > 0: logger.info( "%d shapes will not be added to the dataset item as they " "would produce invalid crops (this is expected for some tasks, " "such as segmentation).", n_invalid_shapes, ) self.annotation_scene.append_annotations(validated_annotations) def
( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: """ Return the subset of the input labels which exist in the dataset item (wrt. ROI). :param labels: Subset of input labels to filter with; if ``None``, all the labels within the ROI are returned :param include_empty: if True, returns both empty and non-empty labels :return: The intersection of the input label set and those present within the ROI """ filtered_labels = set() for label in self.roi.get_labels(include_empty): if labels is None or label.get_label() in labels: filtered_labels.add(label.get_label()) return sorted(list(filtered_labels), key=lambda x: x.name) def get_shapes_labels( self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False ) -> List[LabelEntity]: """ Get the labels of the shapes present in this dataset item. if a label list is supplied, only labels present within that list are returned. if include_empty is True, present empty labels are returned as well. :param labels: if supplied only labels present in this list are returned :param include_empty: if True, returns both empty and non-empty labels :return: a list of labels from the shapes within the roi of this dataset item """ annotations = self.get_annotations() scored_label_set = set( itertools.chain( *[annotation.get_labels(include_empty) for annotation in annotations] ) ) label_set = {scored_label.get_label() for scored_label in scored_label_set} if labels is None: return list(label_set) return [label for label in label_set if label in labels] def append_labels(self, labels: List[ScoredLabel]): """ Appends labels to the DatasetItem and adds it to the the annotation label as well if it's not yet there :param labels: list of labels to be appended """ if len(labels) == 0: return roi_annotation = None for annotation in self.annotation_scene.annotations: if annotation == self.roi: roi_annotation = annotation break if roi_annotation is None: # no annotation found with shape roi_annotation = self.roi self.annotation_scene.append_annotation(roi_annotation) for label in labels: if label not in self.roi.get_labels(include_empty=True): self.roi.append_label(label) if label not in roi_annotation.get_labels(include_empty=True): roi_annotation.append_label(label) def __eq__(self, other): if isinstance(other, DatasetItemEntity): return ( self.media == other.media and self.annotation_scene == other.annotation_scene and self.roi == other.roi and self.subset == other.subset ) return False def __deepcopy__(self, memo): """ When we deepcopy this object, be sure not to deep copy the lock, as this is not possible, make a new lock instead. """ # Call ROI getter to ensure original object has an ROI. _ = self.roi clone = copy.copy(self) for name, value in vars(self).items(): if "__roi_lock" in name: setattr(clone, name, Lock()) else: setattr(clone, name, copy.deepcopy(value, memo)) return clone def append_metadata_item( self, data: IMetadata, model: Optional[ModelEntity] = None ): """ Appends metadata produced by some model to the dataset item. .. rubric:: Adding visualization heatmap (ResultMediaEntity) to DatasetItemEntity >>> from ote_sdk.entities.image import Image >>> from ote_sdk.entities.result_media import ResultMediaEntity >>> media = Image(file_path='image.jpeg') >>> annotation = NullAnnotationSceneEntity() >>> dataset_item = DatasetItem(media=media, annotation_scene=annotation) >>> data = np.ones((120, 120, 3)).astype(np.uint8) * 255 # Saliency numpy >>> result_media = ResultMediaEntity(name="Gradcam++", ... type="Gradcam++", ... annotation_scene=annotation, ... numpy=data) >>> dataset_item.append_metadata_item(result_media) .. rubric:: Representation vector for active learning >>> from ote_sdk.entities.tensor import TensorEntity >>> tensor = TensorEntity(name="representation_vector", numpy=data) >>> dataset_item.append_metadata_item(data=tensor, model=model) :param data: any object of a class inherited from IMetadata. (e.g., FloatMetadata, Tensor) :param model: model that was used to generated metadata """ self.__metadata.append(MetadataItemEntity(data=data, model=model)) def get_metadata_by_name_and_model( self, name: str, model: Optional[ModelEntity] ) -> Sequence[MetadataItemEntity]: """ Returns a metadata item with `name` and generated by `model`. :param name: the name of the metadata :param model: the model which was used to generate the metadata. :return: """ return [ meta for meta in self.metadata if meta.data.name == name and meta.model == model ]
get_roi_labels