hexsha
stringlengths 40
40
| size
int64 4
1.05M
| content
stringlengths 4
1.05M
| avg_line_length
float64 1.33
100
| max_line_length
int64 1
1k
| alphanum_fraction
float64 0.25
1
|
---|---|---|---|---|---|
23eb775bbb53490bd7c2b5b367172e420d906ff4
| 875 |
use std::cell::RefCell;
use std::rc::Rc;
impl Solution {
pub fn invert_tree(root: Option<Rc<RefCell<TreeNode>>>) -> Option<Rc<RefCell<TreeNode>>> {
if let Some(root) = root {
let left = root.borrow_mut().left.take();
let right = root.borrow_mut().right.take();
let left = Solution::invert_tree(left);
let right = Solution::invert_tree(right);
root.borrow_mut().left = right;
root.borrow_mut().right = left;
return Some(root);
} else {
return None;
}
}
}
pub struct Solution;
// Definition for a binary tree node.
#[derive(Debug, PartialEq, Eq)]
pub struct TreeNode {
pub val: i32,
pub left: Option<Rc<RefCell<TreeNode>>>,
pub right: Option<Rc<RefCell<TreeNode>>>,
}
impl TreeNode {
#[inline]
pub fn new(val: i32) -> Self {
TreeNode {
val,
left: None,
right: None,
}
}
}
| 22.435897 | 92 | 0.612571 |
d586930d2b54804c26682ee74395266851f1d86f
| 4,900 |
use async_trait::async_trait;
use crate::error;
use crate::scalar::value::number::*;
use crate::transaction::{Txn, TxnId};
use crate::{TCBoxTryFuture, TCResult};
mod einsum;
mod handlers;
mod stream;
mod transform;
pub mod bounds;
pub mod class;
pub mod dense;
pub mod sparse;
pub use bounds::*;
pub use class::{Tensor, TensorInstance, TensorType};
pub use dense::{from_sparse, Array, DenseTensor};
pub use einsum::einsum;
pub use sparse::{from_dense, SparseTensor};
pub type Coord = Vec<u64>;
pub const ERR_NONBIJECTIVE_WRITE: &str = "Cannot write to a derived Tensor which is not a \
bijection of its source. Consider copying first, or writing directly to the source Tensor.";
pub trait IntoView {
fn into_view(self) -> Tensor;
}
pub trait TensorAccess: Send {
fn dtype(&self) -> NumberType;
fn ndim(&self) -> usize;
fn shape(&'_ self) -> &'_ Shape;
fn size(&self) -> u64;
}
#[async_trait]
pub trait TensorBoolean<O>: TensorAccess + Sized {
type Combine: TensorInstance;
fn and(&self, other: &O) -> TCResult<Self::Combine>;
fn or(&self, other: &O) -> TCResult<Self::Combine>;
fn xor(&self, other: &O) -> TCResult<Self::Combine>;
}
#[async_trait]
pub trait TensorUnary: TensorAccess + Sized {
type Unary: TensorInstance;
fn abs(&self) -> TCResult<Self::Unary>;
async fn all(&self, txn: &Txn) -> TCResult<bool>;
async fn any(&self, txn: &Txn) -> TCResult<bool>;
fn not(&self) -> TCResult<Self::Unary>;
}
#[async_trait]
pub trait TensorCompare<O>: TensorAccess + Sized {
type Compare: TensorInstance;
type Dense: TensorInstance;
async fn eq(&self, other: &O, txn: &Txn) -> TCResult<Self::Dense>;
fn gt(&self, other: &O) -> TCResult<Self::Compare>;
async fn gte(&self, other: &O, txn: &Txn) -> TCResult<Self::Dense>;
fn lt(&self, other: &O) -> TCResult<Self::Compare>;
async fn lte(&self, other: &O, txn: &Txn) -> TCResult<Self::Dense>;
fn ne(&self, other: &O) -> TCResult<Self::Compare>;
}
#[async_trait]
pub trait TensorIO: TensorAccess + Sized {
async fn read_value(&self, txn: &Txn, coord: Coord) -> TCResult<Number>;
async fn write_value(
&self,
txn_id: TxnId,
bounds: bounds::Bounds,
value: Number,
) -> TCResult<()>;
async fn write_value_at(&self, txn_id: TxnId, coord: Coord, value: Number) -> TCResult<()>;
}
#[async_trait]
pub trait TensorDualIO<O>: TensorAccess + Sized {
async fn mask(&self, txn: &Txn, value: O) -> TCResult<()>;
async fn write(&self, txn: &Txn, bounds: bounds::Bounds, value: O) -> TCResult<()>;
}
pub trait TensorMath<O>: TensorAccess + Sized {
type Combine: TensorInstance;
fn add(&self, other: &O) -> TCResult<Self::Combine>;
fn multiply(&self, other: &O) -> TCResult<Self::Combine>;
}
pub trait TensorReduce: TensorAccess + Sized {
type Reduce: TensorInstance;
fn product(&self, axis: usize) -> TCResult<Self::Reduce>;
fn product_all(&self, txn: Txn) -> TCBoxTryFuture<Number>;
fn sum(&self, axis: usize) -> TCResult<Self::Reduce>;
fn sum_all(&self, txn: Txn) -> TCBoxTryFuture<Number>;
}
pub trait TensorTransform: TensorAccess + Sized {
type Cast: TensorInstance;
type Broadcast: TensorInstance;
type Expand: TensorInstance;
type Slice: TensorInstance;
type Transpose: TensorInstance;
fn as_type(&self, dtype: NumberType) -> TCResult<Self::Cast>;
fn broadcast(&self, shape: bounds::Shape) -> TCResult<Self::Broadcast>;
fn expand_dims(&self, axis: usize) -> TCResult<Self::Expand>;
fn slice(&self, bounds: bounds::Bounds) -> TCResult<Self::Slice>;
fn transpose(&self, permutation: Option<Vec<usize>>) -> TCResult<Self::Transpose>;
}
fn broadcast<L: Clone + TensorTransform, R: Clone + TensorTransform>(
left: &L,
right: &R,
) -> TCResult<(
<L as TensorTransform>::Broadcast,
<R as TensorTransform>::Broadcast,
)> {
let mut left_shape = left.shape().to_vec();
let mut right_shape = right.shape().to_vec();
match (left_shape.len(), right_shape.len()) {
(l, r) if l < r => {
for _ in 0..(r - l) {
left_shape.insert(0, 1);
}
}
(l, r) if r < l => {
for _ in 0..(l - r) {
right_shape.insert(0, 1);
}
}
_ => {}
}
let mut shape = Vec::with_capacity(left_shape.len());
for (l, r) in left_shape.iter().zip(right_shape.iter()) {
if l == r || *l == 1 {
shape.push(*r);
} else if *r == 1 {
shape.push(*l)
} else {
return Err(error::bad_request(
"Cannot broadcast dimension",
format!("{} into {}", l, r),
));
}
}
let left = left.broadcast(shape.to_vec().into())?;
let right = right.broadcast(shape.into())?;
Ok((left, right))
}
| 26.486486 | 95 | 0.613061 |
d9278f688ee914f5d6c285a8f32cb3af4d45f1ac
| 2,383 |
use log::*;
use serde::{Deserialize, Serialize};
use simplelog::{Config as LogConfig, LevelFilter, TermLogger};
use dono::*;
use error::Error;
use server::HttpServer;
fn get_log_level() -> LevelFilter {
match std::env::var("DONO_LOG")
.map(|s| s.to_ascii_uppercase())
.unwrap_or_default()
.as_str()
{
"TRACE" => LevelFilter::Trace,
"DEBUG" => LevelFilter::Debug,
"WARN" => LevelFilter::Warn,
"ERROR" => LevelFilter::Error,
// default
"INFO" | _ => LevelFilter::Info,
}
}
fn main() {
TermLogger::init(
get_log_level(),
LogConfig::default(), // some config
)
.expect("initialize logger");
let dir = directories::ProjectDirs::from("com.github", "museun", "dono_server").unwrap();
std::fs::create_dir_all(dir.data_dir()).expect("must be able to create project dirs");
std::fs::create_dir_all(dir.config_dir()).expect("must be able to create project dirs");
#[derive(Deserialize, Serialize, Clone, Debug)]
struct Config {
pub address: String,
pub port: u16,
}
let file = dir.config_dir().join("config.toml");
let config: Config = match std::fs::read(&file)
.ok()
.and_then(|data| toml::from_slice(&data).ok())
{
Some(config) => config,
None => {
warn!("creating default config.toml at {}", file.to_str().unwrap());
warn!("edit and re-run");
let data = toml::to_string_pretty(&Config {
address: "localhost".into(),
port: 50006,
})
.expect("valid config");
std::fs::write(file, &data).expect("write config");
std::process::exit(1)
}
};
database::DB_PATH
.set(dir.data_dir().join("videos.db"))
.expect("must be able to set DB path");
if let Err(err) = database::get_connection()
.execute_batch(include_str!("../../sql/schema.sql"))
.map_err(Error::Sql)
{
error!("cannot create tables from schema: {}", err);
std::process::exit(1)
}
let server = match HttpServer::new((config.address.as_str(), config.port)) {
Ok(server) => server,
Err(err) => {
error!("cannot start http server: {}", err);
std::process::exit(1)
}
};
server.run()
}
| 28.710843 | 93 | 0.554763 |
872448a85f319378b90b3a12220e0daac1731b40
| 170,307 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(non_camel_case_types)]
#![allow(unused_imports)]
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorFieldContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub target: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponse {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponseBody>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ErrorResponseBody {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub details: Vec<ErrorFieldContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegionContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "isMasterRegion", default, skip_serializing_if = "Option::is_none")]
pub is_master_region: Option<bool>,
#[serde(rename = "isDeleted", default, skip_serializing_if = "Option::is_none")]
pub is_deleted: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RegionContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AccessInformationContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccessInformationContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationSecretsContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationCreateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccessInformationCreateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationCreateParameterProperties {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AccessInformationUpdateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AccessInformationUpdateParameterProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantSettingsCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<TenantSettingsContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantSettingsContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TenantSettingsContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantSettingsContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub settings: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ApiContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiContractProperties {
#[serde(flatten)]
pub api_entity_base_contract: ApiEntityBaseContract,
#[serde(rename = "sourceApiId", default, skip_serializing_if = "Option::is_none")]
pub source_api_id: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<String>,
pub path: String,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub protocols: Vec<String>,
#[serde(rename = "apiVersionSet", default, skip_serializing_if = "Option::is_none")]
pub api_version_set: Option<ApiVersionSetContractDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiCreateOrUpdateParameter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiCreateOrUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiCreateOrUpdateProperties {
#[serde(flatten)]
pub api_contract_properties: ApiContractProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<api_create_or_update_properties::Format>,
#[serde(rename = "wsdlSelector", default, skip_serializing_if = "Option::is_none")]
pub wsdl_selector: Option<api_create_or_update_properties::WsdlSelector>,
#[serde(rename = "apiType", default, skip_serializing_if = "Option::is_none")]
pub api_type: Option<api_create_or_update_properties::ApiType>,
}
pub mod api_create_or_update_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Format {
#[serde(rename = "wadl-xml")]
WadlXml,
#[serde(rename = "wadl-link-json")]
WadlLinkJson,
#[serde(rename = "swagger-json")]
SwaggerJson,
#[serde(rename = "swagger-link-json")]
SwaggerLinkJson,
#[serde(rename = "wsdl")]
Wsdl,
#[serde(rename = "wsdl-link")]
WsdlLink,
#[serde(rename = "openapi")]
Openapi,
#[serde(rename = "openapi+json")]
OpenapiJson,
#[serde(rename = "openapi-link")]
OpenapiLink,
#[serde(rename = "openapi+json-link")]
OpenapiJsonLink,
#[serde(rename = "graphql-link")]
GraphqlLink,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct WsdlSelector {
#[serde(rename = "wsdlServiceName", default, skip_serializing_if = "Option::is_none")]
pub wsdl_service_name: Option<String>,
#[serde(rename = "wsdlEndpointName", default, skip_serializing_if = "Option::is_none")]
pub wsdl_endpoint_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ApiType {
#[serde(rename = "http")]
Http,
#[serde(rename = "soap")]
Soap,
#[serde(rename = "websocket")]
Websocket,
#[serde(rename = "graphql")]
Graphql,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiEntityBaseContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "authenticationSettings", default, skip_serializing_if = "Option::is_none")]
pub authentication_settings: Option<AuthenticationSettingsContract>,
#[serde(rename = "subscriptionKeyParameterNames", default, skip_serializing_if = "Option::is_none")]
pub subscription_key_parameter_names: Option<SubscriptionKeyParameterNamesContract>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<api_entity_base_contract::Type>,
#[serde(rename = "apiRevision", default, skip_serializing_if = "Option::is_none")]
pub api_revision: Option<String>,
#[serde(rename = "apiVersion", default, skip_serializing_if = "Option::is_none")]
pub api_version: Option<String>,
#[serde(rename = "isCurrent", default, skip_serializing_if = "Option::is_none")]
pub is_current: Option<bool>,
#[serde(rename = "isOnline", default, skip_serializing_if = "Option::is_none")]
pub is_online: Option<bool>,
#[serde(rename = "apiRevisionDescription", default, skip_serializing_if = "Option::is_none")]
pub api_revision_description: Option<String>,
#[serde(rename = "apiVersionDescription", default, skip_serializing_if = "Option::is_none")]
pub api_version_description: Option<String>,
#[serde(rename = "apiVersionSetId", default, skip_serializing_if = "Option::is_none")]
pub api_version_set_id: Option<String>,
#[serde(rename = "subscriptionRequired", default, skip_serializing_if = "Option::is_none")]
pub subscription_required: Option<bool>,
#[serde(rename = "termsOfServiceUrl", default, skip_serializing_if = "Option::is_none")]
pub terms_of_service_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub contact: Option<ApiContactInformation>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub license: Option<ApiLicenseInformation>,
}
pub mod api_entity_base_contract {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "http")]
Http,
#[serde(rename = "soap")]
Soap,
#[serde(rename = "websocket")]
Websocket,
#[serde(rename = "graphql")]
Graphql,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiContactInformation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiLicenseInformation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiExportResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<api_export_result::Format>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<api_export_result::Value>,
}
pub mod api_export_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Format {
#[serde(rename = "swagger-link-json")]
SwaggerLinkJson,
#[serde(rename = "wadl-link-json")]
WadlLinkJson,
#[serde(rename = "wsdl-link+xml")]
WsdlLinkXml,
#[serde(rename = "openapi-link")]
OpenapiLink,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Value {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub link: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiReleaseCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ApiReleaseContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiReleaseContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiReleaseContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiReleaseContractProperties {
#[serde(rename = "apiId", default, skip_serializing_if = "Option::is_none")]
pub api_id: Option<String>,
#[serde(rename = "createdDateTime", default, skip_serializing_if = "Option::is_none")]
pub created_date_time: Option<String>,
#[serde(rename = "updatedDateTime", default, skip_serializing_if = "Option::is_none")]
pub updated_date_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub notes: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiRevisionCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ApiRevisionContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiRevisionContract {
#[serde(rename = "apiId", default, skip_serializing_if = "Option::is_none")]
pub api_id: Option<String>,
#[serde(rename = "apiRevision", default, skip_serializing_if = "Option::is_none")]
pub api_revision: Option<String>,
#[serde(rename = "createdDateTime", default, skip_serializing_if = "Option::is_none")]
pub created_date_time: Option<String>,
#[serde(rename = "updatedDateTime", default, skip_serializing_if = "Option::is_none")]
pub updated_date_time: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "privateUrl", default, skip_serializing_if = "Option::is_none")]
pub private_url: Option<String>,
#[serde(rename = "isOnline", default, skip_serializing_if = "Option::is_none")]
pub is_online: Option<bool>,
#[serde(rename = "isCurrent", default, skip_serializing_if = "Option::is_none")]
pub is_current: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiRevisionInfoContract {
#[serde(rename = "sourceApiId", default, skip_serializing_if = "Option::is_none")]
pub source_api_id: Option<String>,
#[serde(rename = "apiVersionName", default, skip_serializing_if = "Option::is_none")]
pub api_version_name: Option<String>,
#[serde(rename = "apiRevisionDescription", default, skip_serializing_if = "Option::is_none")]
pub api_revision_description: Option<String>,
#[serde(rename = "apiVersionSet", default, skip_serializing_if = "Option::is_none")]
pub api_version_set: Option<ApiVersionSetContractDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiTagResourceContractProperties {
#[serde(flatten)]
pub api_entity_base_contract: ApiEntityBaseContract,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub protocols: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiContractUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiContractUpdateProperties {
#[serde(flatten)]
pub api_entity_base_contract: ApiEntityBaseContract,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "serviceUrl", default, skip_serializing_if = "Option::is_none")]
pub service_url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub protocols: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ApiVersionSetContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiVersionSetContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetContractDetails {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "versioningScheme", default, skip_serializing_if = "Option::is_none")]
pub versioning_scheme: Option<api_version_set_contract_details::VersioningScheme>,
#[serde(rename = "versionQueryName", default, skip_serializing_if = "Option::is_none")]
pub version_query_name: Option<String>,
#[serde(rename = "versionHeaderName", default, skip_serializing_if = "Option::is_none")]
pub version_header_name: Option<String>,
}
pub mod api_version_set_contract_details {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VersioningScheme {
Segment,
Query,
Header,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetContractProperties {
#[serde(flatten)]
pub api_version_set_entity_base: ApiVersionSetEntityBase,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(rename = "versioningScheme")]
pub versioning_scheme: api_version_set_contract_properties::VersioningScheme,
}
pub mod api_version_set_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VersioningScheme {
Segment,
Query,
Header,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetEntityBase {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "versionQueryName", default, skip_serializing_if = "Option::is_none")]
pub version_query_name: Option<String>,
#[serde(rename = "versionHeaderName", default, skip_serializing_if = "Option::is_none")]
pub version_header_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiVersionSetUpdateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionSetUpdateParametersProperties {
#[serde(flatten)]
pub api_version_set_entity_base: ApiVersionSetEntityBase,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "versioningScheme", default, skip_serializing_if = "Option::is_none")]
pub versioning_scheme: Option<api_version_set_update_parameters_properties::VersioningScheme>,
}
pub mod api_version_set_update_parameters_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VersioningScheme {
Segment,
Query,
Header,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthenticationSettingsContract {
#[serde(rename = "oAuth2", default, skip_serializing_if = "Option::is_none")]
pub o_auth2: Option<OAuth2AuthenticationSettingsContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub openid: Option<OpenIdAuthenticationSettingsContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<AuthorizationServerContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AuthorizationServerContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerContractBaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "authorizationMethods", default, skip_serializing_if = "Vec::is_empty")]
pub authorization_methods: Vec<String>,
#[serde(rename = "clientAuthenticationMethod", default, skip_serializing_if = "Vec::is_empty")]
pub client_authentication_method: Vec<String>,
#[serde(rename = "tokenBodyParameters", default, skip_serializing_if = "Vec::is_empty")]
pub token_body_parameters: Vec<TokenBodyParameterContract>,
#[serde(rename = "tokenEndpoint", default, skip_serializing_if = "Option::is_none")]
pub token_endpoint: Option<String>,
#[serde(rename = "supportState", default, skip_serializing_if = "Option::is_none")]
pub support_state: Option<bool>,
#[serde(rename = "defaultScope", default, skip_serializing_if = "Option::is_none")]
pub default_scope: Option<String>,
#[serde(rename = "bearerTokenSendingMethods", default, skip_serializing_if = "Vec::is_empty")]
pub bearer_token_sending_methods: Vec<String>,
#[serde(rename = "resourceOwnerUsername", default, skip_serializing_if = "Option::is_none")]
pub resource_owner_username: Option<String>,
#[serde(rename = "resourceOwnerPassword", default, skip_serializing_if = "Option::is_none")]
pub resource_owner_password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerContractProperties {
#[serde(flatten)]
pub authorization_server_contract_base_properties: AuthorizationServerContractBaseProperties,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(rename = "clientRegistrationEndpoint")]
pub client_registration_endpoint: String,
#[serde(rename = "authorizationEndpoint")]
pub authorization_endpoint: String,
#[serde(rename = "grantTypes")]
pub grant_types: Vec<String>,
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerUpdateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<AuthorizationServerUpdateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerUpdateContractProperties {
#[serde(flatten)]
pub authorization_server_contract_base_properties: AuthorizationServerContractBaseProperties,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "clientRegistrationEndpoint", default, skip_serializing_if = "Option::is_none")]
pub client_registration_endpoint: Option<String>,
#[serde(rename = "authorizationEndpoint", default, skip_serializing_if = "Option::is_none")]
pub authorization_endpoint: Option<String>,
#[serde(rename = "grantTypes", default, skip_serializing_if = "Vec::is_empty")]
pub grant_types: Vec<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AuthorizationServerSecretsContract {
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
#[serde(rename = "resourceOwnerUsername", default, skip_serializing_if = "Option::is_none")]
pub resource_owner_username: Option<String>,
#[serde(rename = "resourceOwnerPassword", default, skip_serializing_if = "Option::is_none")]
pub resource_owner_password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendAuthorizationHeaderCredentials {
pub scheme: String,
pub parameter: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendBaseParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BackendProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credentials: Option<BackendCredentialsContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub proxy: Option<BackendProxyContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tls: Option<BackendTlsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<BackendContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BackendContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendContractProperties {
#[serde(flatten)]
pub backend_base_parameters: BackendBaseParameters,
pub url: String,
pub protocol: backend_contract_properties::Protocol,
}
pub mod backend_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
#[serde(rename = "http")]
Http,
#[serde(rename = "soap")]
Soap,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendCredentialsContract {
#[serde(rename = "certificateIds", default, skip_serializing_if = "Vec::is_empty")]
pub certificate_ids: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub certificate: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub header: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authorization: Option<BackendAuthorizationHeaderCredentials>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendProperties {
#[serde(rename = "serviceFabricCluster", default, skip_serializing_if = "Option::is_none")]
pub service_fabric_cluster: Option<BackendServiceFabricClusterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendProxyContract {
pub url: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub username: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendReconnectContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BackendReconnectProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendReconnectProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub after: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendServiceFabricClusterProperties {
#[serde(rename = "clientCertificateId", default, skip_serializing_if = "Option::is_none")]
pub client_certificate_id: Option<String>,
#[serde(rename = "clientCertificatethumbprint", default, skip_serializing_if = "Option::is_none")]
pub client_certificatethumbprint: Option<String>,
#[serde(rename = "maxPartitionResolutionRetries", default, skip_serializing_if = "Option::is_none")]
pub max_partition_resolution_retries: Option<i32>,
#[serde(rename = "managementEndpoints")]
pub management_endpoints: Vec<String>,
#[serde(rename = "serverCertificateThumbprints", default, skip_serializing_if = "Vec::is_empty")]
pub server_certificate_thumbprints: Vec<String>,
#[serde(rename = "serverX509Names", default, skip_serializing_if = "Vec::is_empty")]
pub server_x509_names: Vec<X509CertificateName>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendTlsProperties {
#[serde(rename = "validateCertificateChain", default, skip_serializing_if = "Option::is_none")]
pub validate_certificate_chain: Option<bool>,
#[serde(rename = "validateCertificateName", default, skip_serializing_if = "Option::is_none")]
pub validate_certificate_name: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendUpdateParameterProperties {
#[serde(flatten)]
pub backend_base_parameters: BackendBaseParameters,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<backend_update_parameter_properties::Protocol>,
}
pub mod backend_update_parameter_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
#[serde(rename = "http")]
Http,
#[serde(rename = "soap")]
Soap,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BackendUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<BackendUpdateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum BearerTokenSendingMethodsContract {
#[serde(rename = "authorizationHeader")]
AuthorizationHeader,
#[serde(rename = "query")]
Query,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BodyDiagnosticSettings {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub bytes: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CacheCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<CacheContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CacheContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CacheContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CacheContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "connectionString")]
pub connection_string: String,
#[serde(rename = "useFromLocation")]
pub use_from_location: String,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CacheUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CacheUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CacheUpdateProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "connectionString", default, skip_serializing_if = "Option::is_none")]
pub connection_string: Option<String>,
#[serde(rename = "useFromLocation", default, skip_serializing_if = "Option::is_none")]
pub use_from_location: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<CertificateContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CertificateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateContractProperties {
pub subject: String,
pub thumbprint: String,
#[serde(rename = "expirationDate")]
pub expiration_date: String,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<KeyVaultContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateCreateOrUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<CertificateCreateOrUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateCreateOrUpdateProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub data: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<KeyVaultContractCreateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMasking {
#[serde(rename = "queryParams", default, skip_serializing_if = "Vec::is_empty")]
pub query_params: Vec<DataMaskingEntity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<DataMaskingEntity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DataMaskingEntity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub mode: Option<data_masking_entity::Mode>,
}
pub mod data_masking_entity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Mode {
Mask,
Hide,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeployConfigurationParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DeployConfigurationParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeployConfigurationParameterProperties {
pub branch: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub force: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnosticCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DiagnosticContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnosticContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DiagnosticContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DiagnosticContractProperties {
#[serde(rename = "alwaysLog", default, skip_serializing_if = "Option::is_none")]
pub always_log: Option<diagnostic_contract_properties::AlwaysLog>,
#[serde(rename = "loggerId")]
pub logger_id: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sampling: Option<SamplingSettings>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub frontend: Option<PipelineDiagnosticSettings>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub backend: Option<PipelineDiagnosticSettings>,
#[serde(rename = "logClientIp", default, skip_serializing_if = "Option::is_none")]
pub log_client_ip: Option<bool>,
#[serde(rename = "httpCorrelationProtocol", default, skip_serializing_if = "Option::is_none")]
pub http_correlation_protocol: Option<diagnostic_contract_properties::HttpCorrelationProtocol>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub verbosity: Option<diagnostic_contract_properties::Verbosity>,
#[serde(rename = "operationNameFormat", default, skip_serializing_if = "Option::is_none")]
pub operation_name_format: Option<diagnostic_contract_properties::OperationNameFormat>,
}
pub mod diagnostic_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AlwaysLog {
#[serde(rename = "allErrors")]
AllErrors,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum HttpCorrelationProtocol {
None,
Legacy,
#[serde(rename = "W3C")]
W3c,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Verbosity {
#[serde(rename = "verbose")]
Verbose,
#[serde(rename = "information")]
Information,
#[serde(rename = "error")]
Error,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum OperationNameFormat {
Name,
Url,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<EmailTemplateContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EmailTemplateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateContractProperties {
pub subject: String,
pub body: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "isDefault", default, skip_serializing_if = "Option::is_none")]
pub is_default: Option<bool>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<EmailTemplateParametersContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateParametersContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateUpdateParameterProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subject: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub parameters: Vec<EmailTemplateParametersContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EmailTemplateUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<EmailTemplateUpdateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GenerateSsoUrlResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<GroupContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GroupContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupContractProperties {
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "builtIn", default, skip_serializing_if = "Option::is_none")]
pub built_in: Option<bool>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<group_contract_properties::Type>,
#[serde(rename = "externalId", default, skip_serializing_if = "Option::is_none")]
pub external_id: Option<String>,
}
pub mod group_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "custom")]
Custom,
#[serde(rename = "system")]
System,
#[serde(rename = "external")]
External,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupCreateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GroupCreateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupCreateParametersProperties {
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<group_create_parameters_properties::Type>,
#[serde(rename = "externalId", default, skip_serializing_if = "Option::is_none")]
pub external_id: Option<String>,
}
pub mod group_create_parameters_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "custom")]
Custom,
#[serde(rename = "system")]
System,
#[serde(rename = "external")]
External,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GroupUpdateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GroupUpdateParametersProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<group_update_parameters_properties::Type>,
#[serde(rename = "externalId", default, skip_serializing_if = "Option::is_none")]
pub external_id: Option<String>,
}
pub mod group_update_parameters_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "custom")]
Custom,
#[serde(rename = "system")]
System,
#[serde(rename = "external")]
External,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpMessageDiagnostic {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub body: Option<BodyDiagnosticSettings>,
#[serde(rename = "dataMasking", default, skip_serializing_if = "Option::is_none")]
pub data_masking: Option<DataMasking>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderBaseParameters {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<identity_provider_base_parameters::Type>,
#[serde(rename = "signinTenant", default, skip_serializing_if = "Option::is_none")]
pub signin_tenant: Option<String>,
#[serde(rename = "allowedTenants", default, skip_serializing_if = "Vec::is_empty")]
pub allowed_tenants: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub authority: Option<String>,
#[serde(rename = "signupPolicyName", default, skip_serializing_if = "Option::is_none")]
pub signup_policy_name: Option<String>,
#[serde(rename = "signinPolicyName", default, skip_serializing_if = "Option::is_none")]
pub signin_policy_name: Option<String>,
#[serde(rename = "profileEditingPolicyName", default, skip_serializing_if = "Option::is_none")]
pub profile_editing_policy_name: Option<String>,
#[serde(rename = "passwordResetPolicyName", default, skip_serializing_if = "Option::is_none")]
pub password_reset_policy_name: Option<String>,
}
pub mod identity_provider_base_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
#[serde(rename = "facebook")]
Facebook,
#[serde(rename = "google")]
Google,
#[serde(rename = "microsoft")]
Microsoft,
#[serde(rename = "twitter")]
Twitter,
#[serde(rename = "aad")]
Aad,
#[serde(rename = "aadB2C")]
AadB2c,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderCreateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IdentityProviderCreateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderCreateContractProperties {
#[serde(flatten)]
pub identity_provider_base_parameters: IdentityProviderBaseParameters,
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret")]
pub client_secret: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IdentityProviderContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderContractProperties {
#[serde(flatten)]
pub identity_provider_base_parameters: IdentityProviderBaseParameters,
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderList {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IdentityProviderContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IdentityProviderUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IdentityProviderUpdateProperties {
#[serde(flatten)]
pub identity_provider_base_parameters: IdentityProviderBaseParameters,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueAttachmentCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IssueAttachmentContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueAttachmentContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IssueAttachmentContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueAttachmentContractProperties {
pub title: String,
#[serde(rename = "contentFormat")]
pub content_format: String,
pub content: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IssueContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueCommentCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<IssueCommentContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueCommentContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IssueCommentContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueCommentContractProperties {
pub text: String,
#[serde(rename = "createdDate", default, skip_serializing_if = "Option::is_none")]
pub created_date: Option<String>,
#[serde(rename = "userId")]
pub user_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IssueContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueContractBaseProperties {
#[serde(rename = "createdDate", default, skip_serializing_if = "Option::is_none")]
pub created_date: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<issue_contract_base_properties::State>,
#[serde(rename = "apiId", default, skip_serializing_if = "Option::is_none")]
pub api_id: Option<String>,
}
pub mod issue_contract_base_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "proposed")]
Proposed,
#[serde(rename = "open")]
Open,
#[serde(rename = "removed")]
Removed,
#[serde(rename = "resolved")]
Resolved,
#[serde(rename = "closed")]
Closed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueContractProperties {
#[serde(flatten)]
pub issue_contract_base_properties: IssueContractBaseProperties,
pub title: String,
pub description: String,
#[serde(rename = "userId")]
pub user_id: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<IssueUpdateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueUpdateContractProperties {
#[serde(flatten)]
pub issue_contract_base_properties: IssueContractBaseProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub title: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultContractCreateProperties {
#[serde(rename = "secretIdentifier", default, skip_serializing_if = "Option::is_none")]
pub secret_identifier: Option<String>,
#[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")]
pub identity_client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultContractProperties {
#[serde(flatten)]
pub key_vault_contract_create_properties: KeyVaultContractCreateProperties,
#[serde(rename = "lastStatus", default, skip_serializing_if = "Option::is_none")]
pub last_status: Option<KeyVaultLastAccessStatusContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct KeyVaultLastAccessStatusContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub code: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(rename = "timeStampUtc", default, skip_serializing_if = "Option::is_none")]
pub time_stamp_utc: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoggerCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<LoggerContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoggerContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoggerContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoggerContractProperties {
#[serde(rename = "loggerType")]
pub logger_type: logger_contract_properties::LoggerType,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credentials: Option<serde_json::Value>,
#[serde(rename = "isBuffered", default, skip_serializing_if = "Option::is_none")]
pub is_buffered: Option<bool>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
}
pub mod logger_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LoggerType {
#[serde(rename = "azureEventHub")]
AzureEventHub,
#[serde(rename = "applicationInsights")]
ApplicationInsights,
#[serde(rename = "azureMonitor")]
AzureMonitor,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoggerUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<LoggerUpdateParameters>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct LoggerUpdateParameters {
#[serde(rename = "loggerType", default, skip_serializing_if = "Option::is_none")]
pub logger_type: Option<logger_update_parameters::LoggerType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub credentials: Option<serde_json::Value>,
#[serde(rename = "isBuffered", default, skip_serializing_if = "Option::is_none")]
pub is_buffered: Option<bool>,
}
pub mod logger_update_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LoggerType {
#[serde(rename = "azureEventHub")]
AzureEventHub,
#[serde(rename = "applicationInsights")]
ApplicationInsights,
#[serde(rename = "azureMonitor")]
AzureMonitor,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotificationCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<NotificationContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotificationContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NotificationContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NotificationContractProperties {
pub title: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub recipients: Option<RecipientsContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OAuth2AuthenticationSettingsContract {
#[serde(rename = "authorizationServerId", default, skip_serializing_if = "Option::is_none")]
pub authorization_server_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenIdAuthenticationSettingsContract {
#[serde(rename = "openidProviderId", default, skip_serializing_if = "Option::is_none")]
pub openid_provider_id: Option<String>,
#[serde(rename = "bearerTokenSendingMethods", default, skip_serializing_if = "Vec::is_empty")]
pub bearer_token_sending_methods: Vec<BearerTokenSendingMethodsContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenIdConnectProviderCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OpenidConnectProviderContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenidConnectProviderContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OpenidConnectProviderContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenidConnectProviderContractProperties {
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "metadataEndpoint")]
pub metadata_endpoint: String,
#[serde(rename = "clientId")]
pub client_id: String,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenidConnectProviderUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OpenidConnectProviderUpdateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OpenidConnectProviderUpdateContractProperties {
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "metadataEndpoint", default, skip_serializing_if = "Option::is_none")]
pub metadata_endpoint: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<OperationContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationContractProperties {
#[serde(flatten)]
pub operation_entity_base_contract: OperationEntityBaseContract,
#[serde(rename = "displayName")]
pub display_name: String,
pub method: String,
#[serde(rename = "urlTemplate")]
pub url_template: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationEntityBaseContract {
#[serde(rename = "templateParameters", default, skip_serializing_if = "Vec::is_empty")]
pub template_parameters: Vec<ParameterContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request: Option<RequestContract>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub responses: Vec<ResponseContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub policies: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResultContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationResultContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResultContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<operation_result_contract_properties::Status>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub started: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub updated: Option<String>,
#[serde(rename = "resultInfo", default, skip_serializing_if = "Option::is_none")]
pub result_info: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<ErrorResponseBody>,
#[serde(rename = "actionLog", default, skip_serializing_if = "Vec::is_empty")]
pub action_log: Vec<OperationResultLogItemContract>,
}
pub mod operation_result_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
Started,
InProgress,
Succeeded,
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationResultLogItemContract {
#[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")]
pub object_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub action: Option<String>,
#[serde(rename = "objectKey", default, skip_serializing_if = "Option::is_none")]
pub object_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationTagResourceContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "apiName", default, skip_serializing_if = "Option::is_none")]
pub api_name: Option<String>,
#[serde(rename = "apiRevision", default, skip_serializing_if = "Option::is_none")]
pub api_revision: Option<String>,
#[serde(rename = "apiVersion", default, skip_serializing_if = "Option::is_none")]
pub api_version: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub method: Option<String>,
#[serde(rename = "urlTemplate", default, skip_serializing_if = "Option::is_none")]
pub url_template: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<OperationUpdateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationUpdateContractProperties {
#[serde(flatten)]
pub operation_entity_base_contract: OperationEntityBaseContract,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub method: Option<String>,
#[serde(rename = "urlTemplate", default, skip_serializing_if = "Option::is_none")]
pub url_template: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterContract {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "type")]
pub type_: String,
#[serde(rename = "defaultValue", default, skip_serializing_if = "Option::is_none")]
pub default_value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub required: Option<bool>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(rename = "schemaId", default, skip_serializing_if = "Option::is_none")]
pub schema_id: Option<String>,
#[serde(rename = "typeName", default, skip_serializing_if = "Option::is_none")]
pub type_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub examples: Option<ParameterExamplesContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterExamplesContract {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ParameterExampleContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub summary: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<serde_json::Value>,
#[serde(rename = "externalValue", default, skip_serializing_if = "Option::is_none")]
pub external_value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PipelineDiagnosticSettings {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub request: Option<HttpMessageDiagnostic>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub response: Option<HttpMessageDiagnostic>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PolicyContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PolicyContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyContractProperties {
pub value: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub format: Option<policy_contract_properties::Format>,
}
pub mod policy_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Format {
#[serde(rename = "xml")]
Xml,
#[serde(rename = "xml-link")]
XmlLink,
#[serde(rename = "rawxml")]
Rawxml,
#[serde(rename = "rawxml-link")]
RawxmlLink,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyDescriptionContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PolicyDescriptionContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyDescriptionContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PolicyDescriptionCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PolicyDescriptionContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalDelegationSettings {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PortalDelegationSettingsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalDelegationSettingsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(rename = "validationKey", default, skip_serializing_if = "Option::is_none")]
pub validation_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subscriptions: Option<SubscriptionsDelegationSettingsProperties>,
#[serde(rename = "userRegistration", default, skip_serializing_if = "Option::is_none")]
pub user_registration: Option<RegistrationDelegationSettingsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSettingsCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PortalSettingsContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSettingsContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PortalSettingsContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSettingsContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(rename = "validationKey", default, skip_serializing_if = "Option::is_none")]
pub validation_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subscriptions: Option<SubscriptionsDelegationSettingsProperties>,
#[serde(rename = "userRegistration", default, skip_serializing_if = "Option::is_none")]
pub user_registration: Option<RegistrationDelegationSettingsProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "termsOfService", default, skip_serializing_if = "Option::is_none")]
pub terms_of_service: Option<TermsOfServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSigninSettingProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSigninSettings {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PortalSigninSettingProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSignupSettings {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PortalSignupSettingsProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSignupSettingsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "termsOfService", default, skip_serializing_if = "Option::is_none")]
pub terms_of_service: Option<TermsOfServiceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ProductContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ProductContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductContractProperties {
#[serde(flatten)]
pub product_entity_base_parameters: ProductEntityBaseParameters,
#[serde(rename = "displayName")]
pub display_name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductEntityBaseParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub terms: Option<String>,
#[serde(rename = "subscriptionRequired", default, skip_serializing_if = "Option::is_none")]
pub subscription_required: Option<bool>,
#[serde(rename = "approvalRequired", default, skip_serializing_if = "Option::is_none")]
pub approval_required: Option<bool>,
#[serde(rename = "subscriptionsLimit", default, skip_serializing_if = "Option::is_none")]
pub subscriptions_limit: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<product_entity_base_parameters::State>,
}
pub mod product_entity_base_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "notPublished")]
NotPublished,
#[serde(rename = "published")]
Published,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductTagResourceContractProperties {
#[serde(flatten)]
pub product_entity_base_parameters: ProductEntityBaseParameters,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
pub name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ProductUpdateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProductUpdateProperties {
#[serde(flatten)]
pub product_entity_base_parameters: ProductEntityBaseParameters,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<NamedValueContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueCreateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NamedValueCreateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueCreateContractProperties {
#[serde(flatten)]
pub named_value_entity_base_parameters: NamedValueEntityBaseParameters,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<KeyVaultContractCreateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NamedValueContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueContractProperties {
#[serde(flatten)]
pub named_value_entity_base_parameters: NamedValueEntityBaseParameters,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<KeyVaultContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueEntityBaseParameters {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub tags: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secret: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueUpdateParameterProperties {
#[serde(flatten)]
pub named_value_entity_base_parameters: NamedValueEntityBaseParameters,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(rename = "keyVault", default, skip_serializing_if = "Option::is_none")]
pub key_vault: Option<KeyVaultContractCreateProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<NamedValueUpdateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaCounterCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<QuotaCounterContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaCounterContract {
#[serde(rename = "counterKey")]
pub counter_key: String,
#[serde(rename = "periodKey")]
pub period_key: String,
#[serde(rename = "periodStartTime")]
pub period_start_time: String,
#[serde(rename = "periodEndTime")]
pub period_end_time: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<QuotaCounterValueContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaCounterValueContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<QuotaCounterValueContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaCounterValueContractProperties {
#[serde(rename = "callsCount", default, skip_serializing_if = "Option::is_none")]
pub calls_count: Option<i32>,
#[serde(rename = "kbTransferred", default, skip_serializing_if = "Option::is_none")]
pub kb_transferred: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct QuotaCounterValueUpdateContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<QuotaCounterValueContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientEmailCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RecipientEmailContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientEmailContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RecipientEmailContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientEmailContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientsContractProperties {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub emails: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub users: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientUserCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RecipientUserContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientUserContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<RecipientUsersContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RecipientUsersContractProperties {
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RegistrationDelegationSettingsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ReportRecordContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ReportRecordContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub interval: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub country: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub zip: Option<String>,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
#[serde(rename = "productId", default, skip_serializing_if = "Option::is_none")]
pub product_id: Option<String>,
#[serde(rename = "apiId", default, skip_serializing_if = "Option::is_none")]
pub api_id: Option<String>,
#[serde(rename = "operationId", default, skip_serializing_if = "Option::is_none")]
pub operation_id: Option<String>,
#[serde(rename = "apiRegion", default, skip_serializing_if = "Option::is_none")]
pub api_region: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "callCountSuccess", default, skip_serializing_if = "Option::is_none")]
pub call_count_success: Option<i32>,
#[serde(rename = "callCountBlocked", default, skip_serializing_if = "Option::is_none")]
pub call_count_blocked: Option<i32>,
#[serde(rename = "callCountFailed", default, skip_serializing_if = "Option::is_none")]
pub call_count_failed: Option<i32>,
#[serde(rename = "callCountOther", default, skip_serializing_if = "Option::is_none")]
pub call_count_other: Option<i32>,
#[serde(rename = "callCountTotal", default, skip_serializing_if = "Option::is_none")]
pub call_count_total: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub bandwidth: Option<i64>,
#[serde(rename = "cacheHitCount", default, skip_serializing_if = "Option::is_none")]
pub cache_hit_count: Option<i32>,
#[serde(rename = "cacheMissCount", default, skip_serializing_if = "Option::is_none")]
pub cache_miss_count: Option<i32>,
#[serde(rename = "apiTimeAvg", default, skip_serializing_if = "Option::is_none")]
pub api_time_avg: Option<f64>,
#[serde(rename = "apiTimeMin", default, skip_serializing_if = "Option::is_none")]
pub api_time_min: Option<f64>,
#[serde(rename = "apiTimeMax", default, skip_serializing_if = "Option::is_none")]
pub api_time_max: Option<f64>,
#[serde(rename = "serviceTimeAvg", default, skip_serializing_if = "Option::is_none")]
pub service_time_avg: Option<f64>,
#[serde(rename = "serviceTimeMin", default, skip_serializing_if = "Option::is_none")]
pub service_time_min: Option<f64>,
#[serde(rename = "serviceTimeMax", default, skip_serializing_if = "Option::is_none")]
pub service_time_max: Option<f64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RepresentationContract {
#[serde(rename = "contentType")]
pub content_type: String,
#[serde(rename = "schemaId", default, skip_serializing_if = "Option::is_none")]
pub schema_id: Option<String>,
#[serde(rename = "typeName", default, skip_serializing_if = "Option::is_none")]
pub type_name: Option<String>,
#[serde(rename = "formParameters", default, skip_serializing_if = "Vec::is_empty")]
pub form_parameters: Vec<ParameterContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub examples: Option<ParameterExamplesContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RequestContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "queryParameters", default, skip_serializing_if = "Vec::is_empty")]
pub query_parameters: Vec<ParameterContract>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<ParameterContract>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub representations: Vec<RepresentationContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RequestReportCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<RequestReportRecordContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RequestReportRecordContract {
#[serde(rename = "apiId", default, skip_serializing_if = "Option::is_none")]
pub api_id: Option<String>,
#[serde(rename = "operationId", default, skip_serializing_if = "Option::is_none")]
pub operation_id: Option<String>,
#[serde(rename = "productId", default, skip_serializing_if = "Option::is_none")]
pub product_id: Option<String>,
#[serde(rename = "userId", default, skip_serializing_if = "Option::is_none")]
pub user_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub method: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub url: Option<String>,
#[serde(rename = "ipAddress", default, skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
#[serde(rename = "backendResponseCode", default, skip_serializing_if = "Option::is_none")]
pub backend_response_code: Option<String>,
#[serde(rename = "responseCode", default, skip_serializing_if = "Option::is_none")]
pub response_code: Option<i32>,
#[serde(rename = "responseSize", default, skip_serializing_if = "Option::is_none")]
pub response_size: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub timestamp: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub cache: Option<String>,
#[serde(rename = "apiTime", default, skip_serializing_if = "Option::is_none")]
pub api_time: Option<f64>,
#[serde(rename = "serviceTime", default, skip_serializing_if = "Option::is_none")]
pub service_time: Option<f64>,
#[serde(rename = "apiRegion", default, skip_serializing_if = "Option::is_none")]
pub api_region: Option<String>,
#[serde(rename = "subscriptionId", default, skip_serializing_if = "Option::is_none")]
pub subscription_id: Option<String>,
#[serde(rename = "requestId", default, skip_serializing_if = "Option::is_none")]
pub request_id: Option<String>,
#[serde(rename = "requestSize", default, skip_serializing_if = "Option::is_none")]
pub request_size: Option<i32>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResponseContract {
#[serde(rename = "statusCode")]
pub status_code: i32,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub representations: Vec<RepresentationContract>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<ParameterContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SamplingSettings {
#[serde(rename = "samplingType", default, skip_serializing_if = "Option::is_none")]
pub sampling_type: Option<sampling_settings::SamplingType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub percentage: Option<f64>,
}
pub mod sampling_settings {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum SamplingType {
#[serde(rename = "fixed")]
Fixed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SaveConfigurationParameter {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SaveConfigurationParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SaveConfigurationParameterProperties {
pub branch: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub force: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SchemaCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SchemaContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SchemaContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SchemaContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SchemaContractProperties {
#[serde(rename = "contentType")]
pub content_type: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub document: Option<SchemaDocumentProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SchemaDocumentProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub definitions: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub components: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<SubscriptionContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubscriptionContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionContractProperties {
#[serde(rename = "ownerId", default, skip_serializing_if = "Option::is_none")]
pub owner_id: Option<String>,
pub scope: String,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
pub state: subscription_contract_properties::State,
#[serde(rename = "createdDate", default, skip_serializing_if = "Option::is_none")]
pub created_date: Option<String>,
#[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")]
pub start_date: Option<String>,
#[serde(rename = "expirationDate", default, skip_serializing_if = "Option::is_none")]
pub expiration_date: Option<String>,
#[serde(rename = "endDate", default, skip_serializing_if = "Option::is_none")]
pub end_date: Option<String>,
#[serde(rename = "notificationDate", default, skip_serializing_if = "Option::is_none")]
pub notification_date: Option<String>,
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
#[serde(rename = "stateComment", default, skip_serializing_if = "Option::is_none")]
pub state_comment: Option<String>,
#[serde(rename = "allowTracing", default, skip_serializing_if = "Option::is_none")]
pub allow_tracing: Option<bool>,
}
pub mod subscription_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "suspended")]
Suspended,
#[serde(rename = "active")]
Active,
#[serde(rename = "expired")]
Expired,
#[serde(rename = "submitted")]
Submitted,
#[serde(rename = "rejected")]
Rejected,
#[serde(rename = "cancelled")]
Cancelled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionCreateParameterProperties {
#[serde(rename = "ownerId", default, skip_serializing_if = "Option::is_none")]
pub owner_id: Option<String>,
pub scope: String,
#[serde(rename = "displayName")]
pub display_name: String,
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<subscription_create_parameter_properties::State>,
#[serde(rename = "allowTracing", default, skip_serializing_if = "Option::is_none")]
pub allow_tracing: Option<bool>,
}
pub mod subscription_create_parameter_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "suspended")]
Suspended,
#[serde(rename = "active")]
Active,
#[serde(rename = "expired")]
Expired,
#[serde(rename = "submitted")]
Submitted,
#[serde(rename = "rejected")]
Rejected,
#[serde(rename = "cancelled")]
Cancelled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionCreateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubscriptionCreateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionKeyParameterNamesContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub header: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub query: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionsDelegationSettingsProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionUpdateParameterProperties {
#[serde(rename = "ownerId", default, skip_serializing_if = "Option::is_none")]
pub owner_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub scope: Option<String>,
#[serde(rename = "expirationDate", default, skip_serializing_if = "Option::is_none")]
pub expiration_date: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<subscription_update_parameter_properties::State>,
#[serde(rename = "stateComment", default, skip_serializing_if = "Option::is_none")]
pub state_comment: Option<String>,
#[serde(rename = "allowTracing", default, skip_serializing_if = "Option::is_none")]
pub allow_tracing: Option<bool>,
}
pub mod subscription_update_parameter_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "suspended")]
Suspended,
#[serde(rename = "active")]
Active,
#[serde(rename = "expired")]
Expired,
#[serde(rename = "submitted")]
Submitted,
#[serde(rename = "rejected")]
Rejected,
#[serde(rename = "cancelled")]
Cancelled,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<SubscriptionUpdateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<TagContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TagContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagContractProperties {
#[serde(rename = "displayName")]
pub display_name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagCreateUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TagContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagDescriptionBaseProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "externalDocsUrl", default, skip_serializing_if = "Option::is_none")]
pub external_docs_url: Option<String>,
#[serde(rename = "externalDocsDescription", default, skip_serializing_if = "Option::is_none")]
pub external_docs_description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagDescriptionCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<TagDescriptionContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagDescriptionContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TagDescriptionContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagDescriptionContractProperties {
#[serde(flatten)]
pub tag_description_base_properties: TagDescriptionBaseProperties,
#[serde(rename = "tagId", default, skip_serializing_if = "Option::is_none")]
pub tag_id: Option<String>,
#[serde(rename = "displayName", default, skip_serializing_if = "Option::is_none")]
pub display_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagDescriptionCreateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TagDescriptionBaseProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagResourceCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<TagResourceContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagResourceContract {
pub tag: TagTagResourceContractProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub api: Option<ApiTagResourceContractProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<OperationTagResourceContractProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub product: Option<ProductTagResourceContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TagTagResourceContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantConfigurationSyncStateContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<TenantConfigurationSyncStateContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TenantConfigurationSyncStateContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub branch: Option<String>,
#[serde(rename = "commitId", default, skip_serializing_if = "Option::is_none")]
pub commit_id: Option<String>,
#[serde(rename = "isExport", default, skip_serializing_if = "Option::is_none")]
pub is_export: Option<bool>,
#[serde(rename = "isSynced", default, skip_serializing_if = "Option::is_none")]
pub is_synced: Option<bool>,
#[serde(rename = "isGitEnabled", default, skip_serializing_if = "Option::is_none")]
pub is_git_enabled: Option<bool>,
#[serde(rename = "syncDate", default, skip_serializing_if = "Option::is_none")]
pub sync_date: Option<String>,
#[serde(rename = "configurationChangeDate", default, skip_serializing_if = "Option::is_none")]
pub configuration_change_date: Option<String>,
#[serde(rename = "lastOperationId", default, skip_serializing_if = "Option::is_none")]
pub last_operation_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TermsOfServiceProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub text: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub enabled: Option<bool>,
#[serde(rename = "consentRequired", default, skip_serializing_if = "Option::is_none")]
pub consent_required: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct TokenBodyParameterContract {
pub name: String,
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UserContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UserContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserContractProperties {
#[serde(flatten)]
pub user_entity_base_parameters: UserEntityBaseParameters,
#[serde(rename = "firstName", default, skip_serializing_if = "Option::is_none")]
pub first_name: Option<String>,
#[serde(rename = "lastName", default, skip_serializing_if = "Option::is_none")]
pub last_name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(rename = "registrationDate", default, skip_serializing_if = "Option::is_none")]
pub registration_date: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub groups: Vec<GroupContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserCreateParameterProperties {
#[serde(flatten)]
pub user_entity_base_parameters: UserEntityBaseParameters,
pub email: String,
#[serde(rename = "firstName")]
pub first_name: String,
#[serde(rename = "lastName")]
pub last_name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "appType", default, skip_serializing_if = "Option::is_none")]
pub app_type: Option<user_create_parameter_properties::AppType>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub confirmation: Option<user_create_parameter_properties::Confirmation>,
}
pub mod user_create_parameter_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AppType {
#[serde(rename = "portal")]
Portal,
#[serde(rename = "developerPortal")]
DeveloperPortal,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Confirmation {
#[serde(rename = "signup")]
Signup,
#[serde(rename = "invite")]
Invite,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserCreateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UserCreateParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserEntityBaseParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub state: Option<user_entity_base_parameters::State>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub note: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub identities: Vec<UserIdentityContract>,
}
pub mod user_entity_base_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum State {
#[serde(rename = "active")]
Active,
#[serde(rename = "blocked")]
Blocked,
#[serde(rename = "pending")]
Pending,
#[serde(rename = "deleted")]
Deleted,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserIdentityCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<UserIdentityContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserIdentityContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserTokenParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UserTokenParameterProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserTokenParameterProperties {
#[serde(rename = "keyType")]
pub key_type: user_token_parameter_properties::KeyType,
pub expiry: String,
}
pub mod user_token_parameter_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeyType {
#[serde(rename = "primary")]
Primary,
#[serde(rename = "secondary")]
Secondary,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserTokenResult {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserUpdateParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<UserUpdateParametersProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserUpdateParametersProperties {
#[serde(flatten)]
pub user_entity_base_parameters: UserEntityBaseParameters,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub email: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub password: Option<String>,
#[serde(rename = "firstName", default, skip_serializing_if = "Option::is_none")]
pub first_name: Option<String>,
#[serde(rename = "lastName", default, skip_serializing_if = "Option::is_none")]
pub last_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct X509CertificateName {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "issuerCertificateThumbprint", default, skip_serializing_if = "Option::is_none")]
pub issuer_certificate_thumbprint: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ClientSecretContract {
#[serde(rename = "clientSecret", default, skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NamedValueSecretContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalSettingValidationKeyContract {
#[serde(rename = "validationKey", default, skip_serializing_if = "Option::is_none")]
pub validation_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SubscriptionKeysContract {
#[serde(rename = "primaryKey", default, skip_serializing_if = "Option::is_none")]
pub primary_key: Option<String>,
#[serde(rename = "secondaryKey", default, skip_serializing_if = "Option::is_none")]
pub secondary_key: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<GatewayContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub count: Option<i64>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GatewayContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayContractProperties {
#[serde(rename = "locationData", default, skip_serializing_if = "Option::is_none")]
pub location_data: Option<ResourceLocationDataContract>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceLocationDataContract {
pub name: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub city: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub district: Option<String>,
#[serde(rename = "countryOrRegion", default, skip_serializing_if = "Option::is_none")]
pub country_or_region: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayKeysContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub primary: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub secondary: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayTokenRequestContract {
#[serde(rename = "keyType")]
pub key_type: gateway_token_request_contract::KeyType,
pub expiry: String,
}
pub mod gateway_token_request_contract {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeyType {
#[serde(rename = "primary")]
Primary,
#[serde(rename = "secondary")]
Secondary,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayTokenContract {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayKeyRegenerationRequestContract {
#[serde(rename = "keyType")]
pub key_type: gateway_key_regeneration_request_contract::KeyType,
}
pub mod gateway_key_regeneration_request_contract {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum KeyType {
#[serde(rename = "primary")]
Primary,
#[serde(rename = "secondary")]
Secondary,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayHostnameConfigurationCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<GatewayHostnameConfigurationContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayHostnameConfigurationContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GatewayHostnameConfigurationContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayHostnameConfigurationContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub hostname: Option<String>,
#[serde(rename = "certificateId", default, skip_serializing_if = "Option::is_none")]
pub certificate_id: Option<String>,
#[serde(rename = "negotiateClientCertificate", default, skip_serializing_if = "Option::is_none")]
pub negotiate_client_certificate: Option<bool>,
#[serde(rename = "tls10Enabled", default, skip_serializing_if = "Option::is_none")]
pub tls10_enabled: Option<bool>,
#[serde(rename = "tls11Enabled", default, skip_serializing_if = "Option::is_none")]
pub tls11_enabled: Option<bool>,
#[serde(rename = "http2Enabled", default, skip_serializing_if = "Option::is_none")]
pub http2_enabled: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayCertificateAuthorityCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<GatewayCertificateAuthorityContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayCertificateAuthorityContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<GatewayCertificateAuthorityContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct GatewayCertificateAuthorityContractProperties {
#[serde(rename = "isTrusted", default, skip_serializing_if = "Option::is_none")]
pub is_trusted: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AssociationContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<association_contract::Properties>,
}
pub mod association_contract {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<properties::ProvisioningState>,
}
pub mod properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ProvisioningState {
#[serde(rename = "created")]
Created,
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentTypeCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ContentTypeContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentTypeContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ContentTypeContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentTypeContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub schema: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentItemCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<ContentItemContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentItemContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ContentItemContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ContentItemContractProperties {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedServicesCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<DeletedServiceContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedServiceContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<DeletedServiceContractProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct DeletedServiceContractProperties {
#[serde(rename = "serviceId", default, skip_serializing_if = "Option::is_none")]
pub service_id: Option<String>,
#[serde(rename = "scheduledPurgeDate", default, skip_serializing_if = "Option::is_none")]
pub scheduled_purge_date: Option<String>,
#[serde(rename = "deletionDate", default, skip_serializing_if = "Option::is_none")]
pub deletion_date: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalRevisionCollection {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PortalRevisionContract>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalRevisionContract {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PortalRevisionContractProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PortalRevisionContractProperties {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "statusDetails", default, skip_serializing_if = "Option::is_none")]
pub status_details: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<portal_revision_contract_properties::Status>,
#[serde(rename = "isCurrent", default, skip_serializing_if = "Option::is_none")]
pub is_current: Option<bool>,
#[serde(rename = "createdDateTime", default, skip_serializing_if = "Option::is_none")]
pub created_date_time: Option<String>,
#[serde(rename = "updatedDateTime", default, skip_serializing_if = "Option::is_none")]
pub updated_date_time: Option<String>,
}
pub mod portal_revision_contract_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
#[serde(rename = "pending")]
Pending,
#[serde(rename = "publishing")]
Publishing,
#[serde(rename = "completed")]
Completed,
#[serde(rename = "failed")]
Failed,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionRequest {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<private_endpoint_connection_request::Properties>,
}
pub mod private_endpoint_connection_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Properties {
#[serde(rename = "privateLinkServiceConnectionState", default, skip_serializing_if = "Option::is_none")]
pub private_link_service_connection_state: Option<PrivateLinkServiceConnectionState>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct RemotePrivateEndpointConnectionWrapper {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionWrapperProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionWrapperProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<ArmIdWrapper>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "groupIds", default, skip_serializing_if = "Vec::is_empty")]
pub group_ids: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ArmIdWrapper {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectivityCheckRequest {
pub source: connectivity_check_request::Source,
pub destination: connectivity_check_request::Destination,
#[serde(rename = "preferredIPVersion", default, skip_serializing_if = "Option::is_none")]
pub preferred_ip_version: Option<connectivity_check_request::PreferredIpVersion>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub protocol: Option<connectivity_check_request::Protocol>,
#[serde(rename = "protocolConfiguration", default, skip_serializing_if = "Option::is_none")]
pub protocol_configuration: Option<connectivity_check_request::ProtocolConfiguration>,
}
pub mod connectivity_check_request {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Source {
pub region: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub instance: Option<i64>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Destination {
pub address: String,
pub port: i64,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PreferredIpVersion {
IPv4,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Protocol {
#[serde(rename = "TCP")]
Tcp,
#[serde(rename = "HTTP")]
Http,
#[serde(rename = "HTTPS")]
Https,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ProtocolConfiguration {
#[serde(rename = "HTTPConfiguration", default, skip_serializing_if = "Option::is_none")]
pub http_configuration: Option<protocol_configuration::HttpConfiguration>,
}
pub mod protocol_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub method: Option<http_configuration::Method>,
#[serde(rename = "validStatusCodes", default, skip_serializing_if = "Vec::is_empty")]
pub valid_status_codes: Vec<i64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub headers: Vec<HttpHeader>,
}
pub mod http_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Method {
#[serde(rename = "GET")]
Get,
#[serde(rename = "POST")]
Post,
}
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HttpHeader {
pub name: String,
pub value: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectivityCheckResponse {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub hops: Vec<ConnectivityHop>,
#[serde(rename = "connectionStatus", default, skip_serializing_if = "Option::is_none")]
pub connection_status: Option<connectivity_check_response::ConnectionStatus>,
#[serde(rename = "avgLatencyInMs", default, skip_serializing_if = "Option::is_none")]
pub avg_latency_in_ms: Option<i64>,
#[serde(rename = "minLatencyInMs", default, skip_serializing_if = "Option::is_none")]
pub min_latency_in_ms: Option<i64>,
#[serde(rename = "maxLatencyInMs", default, skip_serializing_if = "Option::is_none")]
pub max_latency_in_ms: Option<i64>,
#[serde(rename = "probesSent", default, skip_serializing_if = "Option::is_none")]
pub probes_sent: Option<i64>,
#[serde(rename = "probesFailed", default, skip_serializing_if = "Option::is_none")]
pub probes_failed: Option<i64>,
}
pub mod connectivity_check_response {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ConnectionStatus {
Unknown,
Connected,
Disconnected,
Degraded,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectivityHop {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub address: Option<String>,
#[serde(rename = "resourceId", default, skip_serializing_if = "Option::is_none")]
pub resource_id: Option<String>,
#[serde(rename = "nextHopIds", default, skip_serializing_if = "Vec::is_empty")]
pub next_hop_ids: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub issues: Vec<ConnectivityIssue>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectivityIssue {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<connectivity_issue::Origin>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub severity: Option<connectivity_issue::Severity>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<connectivity_issue::Type>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub context: Vec<IssueContext>,
}
pub mod connectivity_issue {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Origin {
Local,
Inbound,
Outbound,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Severity {
Error,
Warning,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Unknown,
AgentStopped,
GuestFirewall,
DnsResolution,
SocketBind,
NetworkSecurityRule,
UserDefinedRoute,
PortThrottled,
Platform,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct IssueContext {}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuResults {
pub value: Vec<ResourceSkuResult>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuResult {
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ResourceSku>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<ResourceSkuCapacity>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSkuCapacity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub default: Option<i32>,
#[serde(rename = "scaleType", default, skip_serializing_if = "Option::is_none")]
pub scale_type: Option<resource_sku_capacity::ScaleType>,
}
pub mod resource_sku_capacity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ScaleType {
#[serde(rename = "automatic")]
Automatic,
#[serde(rename = "manual")]
Manual,
#[serde(rename = "none")]
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ResourceSku {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<resource_sku::Name>,
}
pub mod resource_sku {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
Developer,
Standard,
Premium,
Basic,
Consumption,
Isolated,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateInformation {
pub expiry: String,
pub thumbprint: String,
pub subject: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct CertificateConfiguration {
#[serde(rename = "encodedCertificate", default, skip_serializing_if = "Option::is_none")]
pub encoded_certificate: Option<String>,
#[serde(rename = "certificatePassword", default, skip_serializing_if = "Option::is_none")]
pub certificate_password: Option<String>,
#[serde(rename = "storeName")]
pub store_name: certificate_configuration::StoreName,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub certificate: Option<CertificateInformation>,
}
pub mod certificate_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum StoreName {
CertificateAuthority,
Root,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct HostnameConfiguration {
#[serde(rename = "type")]
pub type_: hostname_configuration::Type,
#[serde(rename = "hostName")]
pub host_name: String,
#[serde(rename = "keyVaultId", default, skip_serializing_if = "Option::is_none")]
pub key_vault_id: Option<String>,
#[serde(rename = "identityClientId", default, skip_serializing_if = "Option::is_none")]
pub identity_client_id: Option<String>,
#[serde(rename = "encodedCertificate", default, skip_serializing_if = "Option::is_none")]
pub encoded_certificate: Option<String>,
#[serde(rename = "certificatePassword", default, skip_serializing_if = "Option::is_none")]
pub certificate_password: Option<String>,
#[serde(rename = "defaultSslBinding", default, skip_serializing_if = "Option::is_none")]
pub default_ssl_binding: Option<bool>,
#[serde(rename = "negotiateClientCertificate", default, skip_serializing_if = "Option::is_none")]
pub negotiate_client_certificate: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub certificate: Option<CertificateInformation>,
#[serde(rename = "certificateSource", default, skip_serializing_if = "Option::is_none")]
pub certificate_source: Option<hostname_configuration::CertificateSource>,
#[serde(rename = "certificateStatus", default, skip_serializing_if = "Option::is_none")]
pub certificate_status: Option<hostname_configuration::CertificateStatus>,
}
pub mod hostname_configuration {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Proxy,
Portal,
Management,
Scm,
DeveloperPortal,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CertificateSource {
Managed,
KeyVault,
Custom,
BuiltIn,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CertificateStatus {
Completed,
Failed,
InProgress,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VirtualNetworkConfiguration {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub vnetid: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub subnetname: Option<String>,
#[serde(rename = "subnetResourceId", default, skip_serializing_if = "Option::is_none")]
pub subnet_resource_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AdditionalLocation {
pub location: String,
pub sku: ApiManagementServiceSkuProperties,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "publicIPAddresses", default, skip_serializing_if = "Vec::is_empty")]
pub public_ip_addresses: Vec<String>,
#[serde(rename = "privateIPAddresses", default, skip_serializing_if = "Vec::is_empty")]
pub private_ip_addresses: Vec<String>,
#[serde(rename = "publicIpAddressId", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_id: Option<String>,
#[serde(rename = "virtualNetworkConfiguration", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_configuration: Option<VirtualNetworkConfiguration>,
#[serde(rename = "gatewayRegionalUrl", default, skip_serializing_if = "Option::is_none")]
pub gateway_regional_url: Option<String>,
#[serde(rename = "disableGateway", default, skip_serializing_if = "Option::is_none")]
pub disable_gateway: Option<bool>,
#[serde(rename = "platformVersion", default, skip_serializing_if = "Option::is_none")]
pub platform_version: Option<additional_location::PlatformVersion>,
}
pub mod additional_location {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PlatformVersion {
#[serde(rename = "undetermined")]
Undetermined,
#[serde(rename = "stv1")]
Stv1,
#[serde(rename = "stv2")]
Stv2,
#[serde(rename = "mtv1")]
Mtv1,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceBackupRestoreParameters {
#[serde(rename = "storageAccount")]
pub storage_account: String,
#[serde(rename = "containerName")]
pub container_name: String,
#[serde(rename = "backupName")]
pub backup_name: String,
#[serde(rename = "accessType", default, skip_serializing_if = "Option::is_none")]
pub access_type: Option<api_management_service_backup_restore_parameters::AccessType>,
#[serde(rename = "accessKey", default, skip_serializing_if = "Option::is_none")]
pub access_key: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
pub mod api_management_service_backup_restore_parameters {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum AccessType {
AccessKey,
SystemAssignedManagedIdentity,
UserAssignedManagedIdentity,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceProperties {
#[serde(flatten)]
pub api_management_service_base_properties: ApiManagementServiceBaseProperties,
#[serde(rename = "publisherEmail")]
pub publisher_email: String,
#[serde(rename = "publisherName")]
pub publisher_name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceUpdateProperties {
#[serde(flatten)]
pub api_management_service_base_properties: ApiManagementServiceBaseProperties,
#[serde(rename = "publisherEmail", default, skip_serializing_if = "Option::is_none")]
pub publisher_email: Option<String>,
#[serde(rename = "publisherName", default, skip_serializing_if = "Option::is_none")]
pub publisher_name: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceBaseProperties {
#[serde(rename = "notificationSenderEmail", default, skip_serializing_if = "Option::is_none")]
pub notification_sender_email: Option<String>,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<String>,
#[serde(rename = "targetProvisioningState", default, skip_serializing_if = "Option::is_none")]
pub target_provisioning_state: Option<String>,
#[serde(rename = "createdAtUtc", default, skip_serializing_if = "Option::is_none")]
pub created_at_utc: Option<String>,
#[serde(rename = "gatewayUrl", default, skip_serializing_if = "Option::is_none")]
pub gateway_url: Option<String>,
#[serde(rename = "gatewayRegionalUrl", default, skip_serializing_if = "Option::is_none")]
pub gateway_regional_url: Option<String>,
#[serde(rename = "portalUrl", default, skip_serializing_if = "Option::is_none")]
pub portal_url: Option<String>,
#[serde(rename = "managementApiUrl", default, skip_serializing_if = "Option::is_none")]
pub management_api_url: Option<String>,
#[serde(rename = "scmUrl", default, skip_serializing_if = "Option::is_none")]
pub scm_url: Option<String>,
#[serde(rename = "developerPortalUrl", default, skip_serializing_if = "Option::is_none")]
pub developer_portal_url: Option<String>,
#[serde(rename = "hostnameConfigurations", default, skip_serializing_if = "Vec::is_empty")]
pub hostname_configurations: Vec<HostnameConfiguration>,
#[serde(rename = "publicIPAddresses", default, skip_serializing_if = "Vec::is_empty")]
pub public_ip_addresses: Vec<String>,
#[serde(rename = "privateIPAddresses", default, skip_serializing_if = "Vec::is_empty")]
pub private_ip_addresses: Vec<String>,
#[serde(rename = "publicIpAddressId", default, skip_serializing_if = "Option::is_none")]
pub public_ip_address_id: Option<String>,
#[serde(rename = "publicNetworkAccess", default, skip_serializing_if = "Option::is_none")]
pub public_network_access: Option<api_management_service_base_properties::PublicNetworkAccess>,
#[serde(rename = "virtualNetworkConfiguration", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_configuration: Option<VirtualNetworkConfiguration>,
#[serde(rename = "additionalLocations", default, skip_serializing_if = "Vec::is_empty")]
pub additional_locations: Vec<AdditionalLocation>,
#[serde(rename = "customProperties", default, skip_serializing_if = "Option::is_none")]
pub custom_properties: Option<serde_json::Value>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub certificates: Vec<CertificateConfiguration>,
#[serde(rename = "enableClientCertificate", default, skip_serializing_if = "Option::is_none")]
pub enable_client_certificate: Option<bool>,
#[serde(rename = "disableGateway", default, skip_serializing_if = "Option::is_none")]
pub disable_gateway: Option<bool>,
#[serde(rename = "virtualNetworkType", default, skip_serializing_if = "Option::is_none")]
pub virtual_network_type: Option<api_management_service_base_properties::VirtualNetworkType>,
#[serde(rename = "apiVersionConstraint", default, skip_serializing_if = "Option::is_none")]
pub api_version_constraint: Option<ApiVersionConstraint>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub restore: Option<bool>,
#[serde(rename = "privateEndpointConnections", default, skip_serializing_if = "Vec::is_empty")]
pub private_endpoint_connections: Vec<RemotePrivateEndpointConnectionWrapper>,
#[serde(rename = "platformVersion", default, skip_serializing_if = "Option::is_none")]
pub platform_version: Option<api_management_service_base_properties::PlatformVersion>,
}
pub mod api_management_service_base_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PublicNetworkAccess {
Enabled,
Disabled,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VirtualNetworkType {
None,
External,
Internal,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PlatformVersion {
#[serde(rename = "undetermined")]
Undetermined,
#[serde(rename = "stv1")]
Stv1,
#[serde(rename = "stv2")]
Stv2,
#[serde(rename = "mtv1")]
Mtv1,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiVersionConstraint {
#[serde(rename = "minApiVersion", default, skip_serializing_if = "Option::is_none")]
pub min_api_version: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceSkuProperties {
pub name: api_management_service_sku_properties::Name,
pub capacity: i32,
}
pub mod api_management_service_sku_properties {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Name {
Developer,
Standard,
Premium,
Basic,
Consumption,
Isolated,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceResource {
#[serde(flatten)]
pub apim_resource: ApimResource,
pub properties: ApiManagementServiceProperties,
pub sku: ApiManagementServiceSkuProperties,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<ApiManagementServiceIdentity>,
#[serde(rename = "systemData", default, skip_serializing_if = "Option::is_none")]
pub system_data: Option<SystemData>,
pub location: String,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApimResource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tags: Option<serde_json::Value>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceUpdateParameters {
#[serde(flatten)]
pub apim_resource: ApimResource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<ApiManagementServiceUpdateProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub sku: Option<ApiManagementServiceSkuProperties>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub identity: Option<ApiManagementServiceIdentity>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub etag: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceListResult {
pub value: Vec<ApiManagementServiceResource>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceGetSsoTokenResult {
#[serde(rename = "redirectUri", default, skip_serializing_if = "Option::is_none")]
pub redirect_uri: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceCheckNameAvailabilityParameters {
pub name: String,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceNameAvailabilityResult {
#[serde(rename = "nameAvailable", default, skip_serializing_if = "Option::is_none")]
pub name_available: Option<bool>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub message: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub reason: Option<api_management_service_name_availability_result::Reason>,
}
pub mod api_management_service_name_availability_result {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Reason {
Valid,
Invalid,
AlreadyExists,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceGetDomainOwnershipIdentifierResult {
#[serde(rename = "domainOwnershipIdentifier", default, skip_serializing_if = "Option::is_none")]
pub domain_ownership_identifier: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceApplyNetworkConfigurationParameters {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementServiceIdentity {
#[serde(rename = "type")]
pub type_: api_management_service_identity::Type,
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "tenantId", default, skip_serializing_if = "Option::is_none")]
pub tenant_id: Option<String>,
#[serde(rename = "userAssignedIdentities", default, skip_serializing_if = "Option::is_none")]
pub user_assigned_identities: Option<serde_json::Value>,
}
pub mod api_management_service_identity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
SystemAssigned,
UserAssigned,
#[serde(rename = "SystemAssigned, UserAssigned")]
SystemAssignedUserAssigned,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct UserIdentityProperties {
#[serde(rename = "principalId", default, skip_serializing_if = "Option::is_none")]
pub principal_id: Option<String>,
#[serde(rename = "clientId", default, skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Operation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub display: Option<operation::Display>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub origin: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<serde_json::Value>,
}
pub mod operation {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Display {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub provider: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub operation: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub resource: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OperationListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<Operation>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ConnectivityStatusContract {
pub name: String,
pub status: connectivity_status_contract::Status,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
#[serde(rename = "lastUpdated")]
pub last_updated: String,
#[serde(rename = "lastStatusChange")]
pub last_status_change: String,
#[serde(rename = "resourceType")]
pub resource_type: String,
#[serde(rename = "isOptional")]
pub is_optional: bool,
}
pub mod connectivity_status_contract {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Status {
#[serde(rename = "initializing")]
Initializing,
#[serde(rename = "success")]
Success,
#[serde(rename = "failure")]
Failure,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkStatusContractByLocation {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(rename = "networkStatus", default, skip_serializing_if = "Option::is_none")]
pub network_status: Option<NetworkStatusContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct NetworkStatusContract {
#[serde(rename = "dnsServers")]
pub dns_servers: Vec<String>,
#[serde(rename = "connectivityStatus")]
pub connectivity_status: Vec<ConnectivityStatusContract>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OutboundEnvironmentEndpointList {
pub value: Vec<OutboundEnvironmentEndpoint>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct OutboundEnvironmentEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub category: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub endpoints: Vec<EndpointDependency>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointDependency {
#[serde(rename = "domainName", default, skip_serializing_if = "Option::is_none")]
pub domain_name: Option<String>,
#[serde(rename = "endpointDetails", default, skip_serializing_if = "Vec::is_empty")]
pub endpoint_details: Vec<EndpointDetail>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct EndpointDetail {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub port: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub region: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuCapacity {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub minimum: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub maximum: Option<i32>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub default: Option<i32>,
#[serde(rename = "scaleType", default, skip_serializing_if = "Option::is_none")]
pub scale_type: Option<api_management_sku_capacity::ScaleType>,
}
pub mod api_management_sku_capacity {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ScaleType {
Automatic,
Manual,
None,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuCosts {
#[serde(rename = "meterID", default, skip_serializing_if = "Option::is_none")]
pub meter_id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub quantity: Option<i64>,
#[serde(rename = "extendedUnit", default, skip_serializing_if = "Option::is_none")]
pub extended_unit: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuCapabilities {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub value: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuZoneDetails {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub name: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<ApiManagementSkuCapabilities>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuRestrictions {
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<api_management_sku_restrictions::Type>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub values: Vec<String>,
#[serde(rename = "restrictionInfo", default, skip_serializing_if = "Option::is_none")]
pub restriction_info: Option<ApiManagementSkuRestrictionInfo>,
#[serde(rename = "reasonCode", default, skip_serializing_if = "Option::is_none")]
pub reason_code: Option<api_management_sku_restrictions::ReasonCode>,
}
pub mod api_management_sku_restrictions {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum Type {
Location,
Zone,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum ReasonCode {
QuotaId,
NotAvailableForSubscription,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSku {
#[serde(rename = "resourceType", default, skip_serializing_if = "Option::is_none")]
pub resource_type: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub tier: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub size: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub family: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kind: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub capacity: Option<ApiManagementSkuCapacity>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(rename = "locationInfo", default, skip_serializing_if = "Vec::is_empty")]
pub location_info: Vec<ApiManagementSkuLocationInfo>,
#[serde(rename = "apiVersions", default, skip_serializing_if = "Vec::is_empty")]
pub api_versions: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub costs: Vec<ApiManagementSkuCosts>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub capabilities: Vec<ApiManagementSkuCapabilities>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub restrictions: Vec<ApiManagementSkuRestrictions>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuLocationInfo {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub location: Option<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
#[serde(rename = "zoneDetails", default, skip_serializing_if = "Vec::is_empty")]
pub zone_details: Vec<ApiManagementSkuZoneDetails>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkuRestrictionInfo {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub locations: Vec<String>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub zones: Vec<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct ApiManagementSkusResult {
pub value: Vec<ApiManagementSku>,
#[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")]
pub next_link: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct Resource {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(rename = "type", default, skip_serializing_if = "Option::is_none")]
pub type_: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkServiceConnectionState {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub status: Option<PrivateEndpointServiceConnectionStatus>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub description: Option<String>,
#[serde(rename = "actionsRequired", default, skip_serializing_if = "Option::is_none")]
pub actions_required: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointServiceConnectionStatus {
Pending,
Approved,
Rejected,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SystemData {
#[serde(rename = "createdBy", default, skip_serializing_if = "Option::is_none")]
pub created_by: Option<String>,
#[serde(rename = "createdByType", default, skip_serializing_if = "Option::is_none")]
pub created_by_type: Option<system_data::CreatedByType>,
#[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")]
pub created_at: Option<String>,
#[serde(rename = "lastModifiedBy", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by: Option<String>,
#[serde(rename = "lastModifiedByType", default, skip_serializing_if = "Option::is_none")]
pub last_modified_by_type: Option<system_data::LastModifiedByType>,
#[serde(rename = "lastModifiedAt", default, skip_serializing_if = "Option::is_none")]
pub last_modified_at: Option<String>,
}
pub mod system_data {
use super::*;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum CreatedByType {
User,
Application,
ManagedIdentity,
Key,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum LastModifiedByType {
User,
Application,
ManagedIdentity,
Key,
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateEndpointConnection>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnection {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateEndpointConnectionProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpointConnectionProperties {
#[serde(rename = "privateEndpoint", default, skip_serializing_if = "Option::is_none")]
pub private_endpoint: Option<PrivateEndpoint>,
#[serde(rename = "privateLinkServiceConnectionState")]
pub private_link_service_connection_state: PrivateLinkServiceConnectionState,
#[serde(rename = "provisioningState", default, skip_serializing_if = "Option::is_none")]
pub provisioning_state: Option<PrivateEndpointConnectionProvisioningState>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateEndpoint {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub id: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum PrivateEndpointConnectionProvisioningState {
Succeeded,
Creating,
Deleting,
Failed,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceListResult {
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub value: Vec<PrivateLinkResource>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResource {
#[serde(flatten)]
pub resource: Resource,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub properties: Option<PrivateLinkResourceProperties>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct PrivateLinkResourceProperties {
#[serde(rename = "groupId", default, skip_serializing_if = "Option::is_none")]
pub group_id: Option<String>,
#[serde(rename = "requiredMembers", default, skip_serializing_if = "Vec::is_empty")]
pub required_members: Vec<String>,
#[serde(rename = "requiredZoneNames", default, skip_serializing_if = "Vec::is_empty")]
pub required_zone_names: Vec<String>,
}
| 43.904872 | 112 | 0.713635 |
d69cd5c2b9464066e8f3b66388d48d478cacd3df
| 1,677 |
pub mod cache;
pub mod drop_duplicates;
pub mod explode;
pub mod filter;
pub mod groupby;
pub mod join;
pub mod melt;
pub mod scan;
pub mod slice;
pub mod sort;
pub mod stack;
pub mod udf;
pub mod various;
use super::*;
use crate::logical_plan::FETCH_ROWS;
use itertools::Itertools;
use polars_core::POOL;
use rayon::prelude::*;
use std::path::PathBuf;
const POLARS_VERBOSE: &str = "POLARS_VERBOSE";
fn set_n_rows(stop_after_n_rows: Option<usize>) -> Option<usize> {
let fetch_rows = FETCH_ROWS.with(|fetch_rows| fetch_rows.get());
match fetch_rows {
None => stop_after_n_rows,
Some(n) => Some(n),
}
}
pub(crate) fn evaluate_physical_expressions(
df: &DataFrame,
exprs: &[Arc<dyn PhysicalExpr>],
state: &ExecutionState,
) -> Result<DataFrame> {
let height = df.height();
let mut selected_columns = POOL.install(|| {
exprs
.par_iter()
.map(|expr| expr.evaluate(df, state))
.collect::<Result<Vec<Series>>>()
})?;
// If all series are the same length it is ok. If not we can broadcast Series of length one.
if selected_columns.len() > 1 {
let all_equal_len = selected_columns.iter().map(|s| s.len()).all_equal();
if !all_equal_len {
selected_columns = selected_columns
.into_iter()
.map(|series| {
if series.len() == 1 && height > 1 {
series.expand_at_index(0, height)
} else {
series
}
})
.collect()
}
}
Ok(DataFrame::new_no_checks(selected_columns))
}
| 26.203125 | 96 | 0.582588 |
1ae03a4bdb4b1292648e76f32b29634a2bf8f1a0
| 1,031 |
//! @ The following code sets up the print routines so that they will gather
//! the desired information.
//
// @d begin_pseudoprint==
pub(crate) macro begin_pseudoprint($globals:expr, $l:expr) {{
// begin l:=tally; tally:=0; selector:=pseudo;
$l = ($globals.tally as u8).into();
$globals.tally = 0;
$globals.selector = pseudo.into();
// trick_count:=1000000;
$globals.trick_count = 1000000;
// end
use crate::section_0054::pseudo;
}}
// @d set_trick_count==
pub(crate) macro set_trick_count($globals:expr) {{
// begin first_count:=tally;
$globals.first_count = $globals.tally;
// trick_count:=tally+1+error_line-half_error_line;
$globals.trick_count =
$globals.tally + 1 + $globals.error_line as integer - $globals.half_error_line as integer;
// if trick_count<error_line then trick_count:=error_line;
if $globals.trick_count < $globals.error_line as _ {
$globals.trick_count = $globals.error_line as _;
}
// end
use crate::pascal::integer;
}}
| 33.258065 | 98 | 0.670223 |
ed3b4162de9861f228917ac423e6cea064ab86ed
| 2,580 |
//! Asynchronous file and standard stream adaptation.
//!
//! This module contains utility methods and adapter types for input/output to
//! files or standard streams (`Stdin`, `Stdout`, `Stderr`), and
//! filesystem manipulation, for use within (and only within) a Tokio runtime.
//!
//! Tasks run by *worker* threads should not block, as this could delay
//! servicing reactor events. Portable filesystem operations are blocking,
//! however. This module offers adapters which use a [`blocking`] annotation
//! to inform the runtime that a blocking operation is required. When
//! necessary, this allows the runtime to convert the current thread from a
//! *worker* to a *backup* thread, where blocking is acceptable.
//!
//! ## Usage
//!
//! Where possible, users should prefer the provided asynchronous-specific
//! traits such as [`AsyncRead`], or methods returning a `Future` or `Poll`
//! type. Adaptions also extend to traits like `std::io::Read` where methods
//! return `std::io::Result`. Be warned that these adapted methods may return
//! `std::io::ErrorKind::WouldBlock` if a *worker* thread can not be converted
//! to a *backup* thread immediately.
//!
//! [`AsyncRead`]: https://docs.rs/tokio-io/0.1/tokio_io/trait.AsyncRead.html
pub(crate) mod blocking;
mod create_dir;
pub use self::create_dir::create_dir;
mod create_dir_all;
pub use self::create_dir_all::create_dir_all;
mod file;
pub use self::file::File;
mod hard_link;
pub use self::hard_link::hard_link;
mod metadata;
pub use self::metadata::metadata;
mod open_options;
pub use self::open_options::OpenOptions;
pub mod os;
mod read;
pub use self::read::read;
mod read_dir;
pub use self::read_dir::{read_dir, DirEntry, ReadDir};
mod read_link;
pub use self::read_link::read_link;
mod read_to_string;
pub use self::read_to_string::read_to_string;
mod remove_dir;
pub use self::remove_dir::remove_dir;
mod remove_dir_all;
pub use self::remove_dir_all::remove_dir_all;
mod remove_file;
pub use self::remove_file::remove_file;
mod rename;
pub use self::rename::rename;
mod set_permissions;
pub use self::set_permissions::set_permissions;
mod symlink_metadata;
pub use self::symlink_metadata::symlink_metadata;
mod write;
pub use self::write::write;
use std::io;
pub(crate) async fn asyncify<F, T>(f: F) -> io::Result<T>
where
F: FnOnce() -> io::Result<T> + Send + 'static,
T: Send + 'static,
{
sys::run(f).await
}
/// Types in this module can be mocked out in tests.
mod sys {
pub(crate) use std::fs::File;
pub(crate) use crate::runtime::blocking::{run, Blocking};
}
| 26.875 | 78 | 0.728682 |
2f6f1a1c9029ca4b512d6fa70482bdc63beed935
| 1,357 |
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 1.3.8
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*/
/// GetCharactersCharacterIdPlanetsPlanetIdContent : content object
#[allow(unused_imports)]
use serde_json::Value;
#[derive(Debug, Serialize, Deserialize)]
pub struct GetCharactersCharacterIdPlanetsPlanetIdContent {
/// amount integer
#[serde(rename = "amount")]
amount: i64,
/// type_id integer
#[serde(rename = "type_id")]
type_id: i32
}
impl GetCharactersCharacterIdPlanetsPlanetIdContent {
/// content object
pub fn new(amount: i64, type_id: i32) -> GetCharactersCharacterIdPlanetsPlanetIdContent {
GetCharactersCharacterIdPlanetsPlanetIdContent {
amount: amount,
type_id: type_id
}
}
pub fn set_amount(&mut self, amount: i64) {
self.amount = amount;
}
pub fn with_amount(mut self, amount: i64) -> GetCharactersCharacterIdPlanetsPlanetIdContent {
self.amount = amount;
self
}
pub fn amount(&self) -> &i64 {
&self.amount
}
pub fn set_type_id(&mut self, type_id: i32) {
self.type_id = type_id;
}
pub fn with_type_id(mut self, type_id: i32) -> GetCharactersCharacterIdPlanetsPlanetIdContent {
self.type_id = type_id;
self
}
pub fn type_id(&self) -> &i32 {
&self.type_id
}
}
| 20.253731 | 97 | 0.692704 |
26f70a6290b18b728cad8688f96c1df1c781f2c3
| 1,556 |
//! Timer interrupt test. LED should flicker unevenly.
#![deny(unsafe_code)]
#![deny(warnings)]
#![feature(proc_macro)]
#![no_std]
extern crate stm32_f429_bgt6;
extern crate stm32f429x;
extern crate cortex_m_rtfm as rtfm;
extern crate nb;
use stm32_f429_bgt6::Timer;
use stm32_f429_bgt6::led::{self, LED};
use stm32_f429_bgt6::prelude::*;
use stm32_f429_bgt6::time::Hertz;
use stm32_f429_bgt6::clock;
use rtfm::{app, Threshold};
use stm32_f429_bgt6::stm32f40x as stm32f429x; //VERY IMPORTANT! Always do this to clarify what the base device crate really is!
const FREQUENCY1: Hertz = Hertz(10);
const FREQUENCY2: Hertz = Hertz(6);
app! {
device: stm32f429x,
tasks: {
TIM2: {
path: tim2,
resources: [TIM2],
},
TIM3: {
path: tim3,
resources: [TIM3],
},
},
}
fn init(p: init::Peripherals) {
// Set system clock in order to test that it works
clock::set_84_mhz(&p.RCC, &p.FLASH);
led::init(p.GPIOA, p.RCC);
let timer = Timer(&*p.TIM2);
timer.init(FREQUENCY1.invert(), p.RCC);
timer.resume();
let timer = Timer(&*p.TIM3);
timer.init(FREQUENCY2.invert(), p.RCC);
timer.resume();
}
fn tim2(_t: &mut Threshold, r: TIM2::Resources) {
// Clear the interrupt flag (RM0368, 13.4.5)
r.TIM2.sr.modify(|_, w| w.uif().clear_bit());
LED.off();
}
fn tim3(_t: &mut Threshold, r: TIM3::Resources) {
r.TIM3.sr.modify(|_, w| w.uif().clear_bit());
LED.on();
}
fn idle() -> ! {
loop {
rtfm::wfi();
}
}
| 21.611111 | 127 | 0.615681 |
1cbdf798dc20e86ecb18fc303d8d793cee951523
| 27,624 |
// Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::Error,
fidl::{self, encoding::OutOfLine, endpoints::ServerEnd},
fidl_fuchsia_bluetooth::{Error as FIDLBTError, ErrorCode as FIDLBTErrorCode},
fidl_fuchsia_bluetooth_bredr::ProfileMarker,
fidl_fuchsia_bluetooth_control::{
AdapterInfo, ControlControlHandle, DeviceClass, InputCapabilityType, OutputCapabilityType,
PairingDelegateProxy, RemoteDevice,
},
fidl_fuchsia_bluetooth_gatt::Server_Marker,
fidl_fuchsia_bluetooth_host::{HostData, HostProxy, LocalKey},
fidl_fuchsia_bluetooth_le::{CentralMarker, PeripheralMarker},
fuchsia_async::{self as fasync, TimeoutExt},
fuchsia_bluetooth::{
self as bt, bt_fidl_status,
error::Error as BTError,
util::{clone_bonding_data, clone_host_data, clone_host_info, clone_remote_device},
},
fuchsia_syslog::{fx_log_err, fx_log_info, fx_vlog},
fuchsia_zircon::{self as zx, Duration},
futures::{task::{Context, Waker}, Future, FutureExt, Poll, TryFutureExt},
parking_lot::RwLock,
slab::Slab,
std::collections::HashMap,
std::fs::File,
std::marker::Unpin,
std::path::Path,
std::sync::{Arc, Weak},
};
use crate::{
host_device::{self, HostDevice},
services,
store::stash::Stash,
};
pub static HOST_INIT_TIMEOUT: i64 = 5; // Seconds
static DEFAULT_NAME: &'static str = "fuchsia";
/// Available FIDL services that can be provided by a particular Host
pub enum HostService {
LeCentral,
LePeripheral,
LeGatt,
Profile,
}
// We use tokens to track the reference counting for discovery/discoverable states
// As long as at least one user maintains an Arc<> to the token, the state persists
// Once all references are dropped, the `Drop` trait on the token causes the state
// to be terminated.
pub struct DiscoveryRequestToken {
adap: Weak<RwLock<HostDevice>>,
}
impl Drop for DiscoveryRequestToken {
#[allow(unused_must_use)] // FIXME(BT-643)
fn drop(&mut self) {
fx_vlog!(1, "DiscoveryRequestToken dropped");
if let Some(host) = self.adap.upgrade() {
// FIXME(nickpollard) this should be `await!`ed, but not while holding the lock
host.write().stop_discovery();
}
}
}
pub struct DiscoverableRequestToken {
adap: Weak<RwLock<HostDevice>>,
}
impl Drop for DiscoverableRequestToken {
#[allow(unused_must_use)] // FIXME(nickpollard)
fn drop(&mut self) {
if let Some(host) = self.adap.upgrade() {
// FIXME(BT-643) this should be `await!`ed, but not while holding the lock
let host = host.write();
host.set_discoverable(false);
}
}
}
type DeviceId = String;
/// The HostDispatcher acts as a proxy aggregating multiple HostAdapters
/// It appears as a Host to higher level systems, and is responsible for
/// routing commands to the appropriate HostAdapter
struct HostDispatcherState {
host_devices: HashMap<String, Arc<RwLock<HostDevice>>>,
active_id: Option<String>,
// Component storage.
pub stash: Stash,
// GAP state
name: String,
discovery: Option<Weak<DiscoveryRequestToken>>,
discoverable: Option<Weak<DiscoverableRequestToken>>,
pub input: InputCapabilityType,
pub output: OutputCapabilityType,
remote_devices: HashMap<DeviceId, RemoteDevice>,
pub pairing_delegate: Option<PairingDelegateProxy>,
pub event_listeners: Vec<Weak<ControlControlHandle>>,
// Pending requests to obtain a Host.
host_requests: Slab<Waker>,
}
impl HostDispatcherState {
/// Set the active adapter for this HostDispatcher
pub fn set_active_adapter(&mut self, adapter_id: String) -> fidl_fuchsia_bluetooth::Status {
if let Some(ref id) = self.active_id {
if *id == adapter_id {
return bt_fidl_status!(Already, "Adapter already active");
}
// Shut down the previously active host.
let _ = self.host_devices[id].write().close();
}
if self.host_devices.contains_key(&adapter_id) {
self.set_active_id(Some(adapter_id));
bt_fidl_status!()
} else {
bt_fidl_status!(NotFound, "Attempting to activate an unknown adapter")
}
}
/// Used to set the pairing delegate. If there is a prior pairing delegate connected to the
/// host it will fail. It checks if the existing stored connection is closed, and will
/// overwrite it if so.
pub fn set_pairing_delegate(&mut self, delegate: Option<PairingDelegateProxy>) -> bool {
match delegate {
Some(delegate) => {
let assign = match self.pairing_delegate {
None => true,
Some(ref pd) => pd.is_closed(),
};
if assign {
self.pairing_delegate = Some(delegate);
}
assign
}
None => {
self.pairing_delegate = None;
false
}
}
}
/// Returns the current pairing delegate proxy if it exists and has not been closed. Clears the
/// if the handle is closed.
pub fn pairing_delegate(&mut self) -> Option<PairingDelegateProxy> {
if let Some(delegate) = &self.pairing_delegate {
if delegate.is_closed() {
self.pairing_delegate = None;
}
}
self.pairing_delegate.clone()
}
/// Return the active id. If the ID is currently not set,
/// it will make the first ID in it's host_devices active
fn get_active_id(&mut self) -> Option<String> {
match self.active_id {
None => match self.host_devices.keys().next() {
None => None,
Some(id) => {
let id = Some(id.clone());
self.set_active_id(id);
self.active_id.clone()
}
},
ref id => id.clone(),
}
}
/// Return the active host. If the Host is currently not set,
/// it will make the first ID in it's host_devices active
fn get_active_host(&mut self) -> Option<Arc<RwLock<HostDevice>>> {
self.get_active_id()
.as_ref()
.and_then(|id| self.host_devices.get(id))
.map(|host| host.clone())
}
/// Resolves all pending OnAdapterFuture's. Called when we leave the init period (by seeing the
/// first host device or when the init timer expires).
fn resolve_host_requests(&mut self) {
for waker in &self.host_requests {
waker.1.wake_by_ref();
}
}
fn add_host(&mut self, id: String, host: Arc<RwLock<HostDevice>>) {
fx_log_info!("Host added: {:?}", host.read().get_info().identifier);
let info = clone_host_info(host.read().get_info());
self.host_devices.insert(id, host);
// Notify Control interface clients about the new device.
self.notify_event_listeners(|l| {
let _res = l.send_on_adapter_updated(&mut clone_host_info(&info));
});
// Resolve pending adapter futures.
self.resolve_host_requests();
}
/// Updates the active adapter and sends a FIDL event.
fn set_active_id(&mut self, id: Option<String>) {
fx_log_info!("New active adapter: {:?}", id);
self.active_id = id;
if let Some(ref mut adapter_info) = self.get_active_adapter_info() {
self.notify_event_listeners(|listener| {
let _res = listener.send_on_active_adapter_changed(Some(OutOfLine(adapter_info)));
})
}
}
pub fn get_active_adapter_info(&mut self) -> Option<AdapterInfo> {
self.get_active_host().map(|host| clone_host_info(host.read().get_info()))
}
pub fn notify_event_listeners<F>(&mut self, mut f: F)
where
F: FnMut(&ControlControlHandle) -> (),
{
self.event_listeners.retain(|listener| match listener.upgrade() {
Some(listener_) => {
f(&listener_);
true
}
None => false,
})
}
}
#[derive(Clone)]
pub struct HostDispatcher {
state: Arc<RwLock<HostDispatcherState>>,
}
impl HostDispatcher {
pub fn new(stash: Stash) -> HostDispatcher {
let hd = HostDispatcherState {
active_id: None,
host_devices: HashMap::new(),
name: DEFAULT_NAME.to_string(),
input: InputCapabilityType::None,
output: OutputCapabilityType::None,
remote_devices: HashMap::new(),
stash: stash,
discovery: None,
discoverable: None,
pairing_delegate: None,
event_listeners: vec![],
host_requests: Slab::new(),
};
HostDispatcher { state: Arc::new(RwLock::new(hd)) }
}
pub fn get_active_adapter_info(&mut self) -> Option<AdapterInfo> {
self.state.write().get_active_adapter_info()
}
pub async fn on_adapters_found(&self) -> fidl::Result<HostDispatcher> {
await!(OnAdaptersFound::new(self.clone()))
}
pub async fn set_name(
&mut self,
name: Option<String>,
) -> fidl::Result<fidl_fuchsia_bluetooth::Status> {
self.state.write().name = name.unwrap_or(DEFAULT_NAME.to_string());
match await!(self.get_active_adapter())? {
Some(adapter) => await!(adapter.write().set_name(self.state.read().name.clone())),
None => Ok(bt_fidl_status!(BluetoothNotAvailable, "No Adapter found")),
}
}
pub async fn set_device_class(
&mut self,
class: DeviceClass,
) -> fidl::Result<fidl_fuchsia_bluetooth::Status> {
match await!(self.get_active_adapter())? {
Some(adapter) => await!(adapter.write().set_device_class(class)),
None => Ok(bt_fidl_status!(BluetoothNotAvailable, "No Adapter found")),
}
}
/// Set the active adapter for this HostDispatcher
pub fn set_active_adapter(&mut self, adapter_id: String) -> fidl_fuchsia_bluetooth::Status {
self.state.write().set_active_adapter(adapter_id)
}
pub fn set_pairing_delegate(&mut self, delegate: Option<PairingDelegateProxy>) -> bool {
self.state.write().set_pairing_delegate(delegate)
}
pub async fn start_discovery(
&mut self,
) -> fidl::Result<(fidl_fuchsia_bluetooth::Status, Option<Arc<DiscoveryRequestToken>>)> {
let strong_current_token =
self.state.read().discovery.as_ref().and_then(|token| token.upgrade());
if let Some(token) = strong_current_token {
return Ok((bt_fidl_status!(), Some(Arc::clone(&token))));
}
match await!(self.get_active_adapter())? {
Some(adapter) => {
let weak_adapter = Arc::downgrade(&adapter);
let resp = await!(adapter.write().start_discovery())?;
match resp.error {
Some(_) => Ok((resp, None)),
None => {
let token = Arc::new(DiscoveryRequestToken { adap: weak_adapter });
self.state.write().discovery = Some(Arc::downgrade(&token));
Ok((resp, Some(token)))
}
}
}
None => Ok((bt_fidl_status!(BluetoothNotAvailable, "No Adapter found"), None)),
}
}
pub async fn set_discoverable(
&mut self,
) -> fidl::Result<(fidl_fuchsia_bluetooth::Status, Option<Arc<DiscoverableRequestToken>>)> {
let strong_current_token =
self.state.read().discoverable.as_ref().and_then(|token| token.upgrade());
if let Some(token) = strong_current_token {
return Ok((bt_fidl_status!(), Some(Arc::clone(&token))));
}
match await!(self.get_active_adapter())? {
Some(adapter) => {
let weak_adapter = Arc::downgrade(&adapter);
let resp = await!(adapter.write().set_discoverable(true))?;
match resp.error {
Some(_) => Ok((resp, None)),
None => {
let token = Arc::new(DiscoverableRequestToken { adap: weak_adapter });
self.state.write().discoverable = Some(Arc::downgrade(&token));
Ok((resp, Some(token)))
}
}
}
None => Ok((bt_fidl_status!(BluetoothNotAvailable, "No Adapter found"), None)),
}
}
pub async fn forget(
&mut self,
peer_id: String,
) -> fidl::Result<fidl_fuchsia_bluetooth::Status> {
// Try to delete from each adapter, even if it might not have the peer.
// remote_devices will be updated by the disconnection(s).
let adapters = await!(self.get_all_adapters());
if adapters.is_empty() {
return Ok(bt_fidl_status!(BluetoothNotAvailable, "No adapter found"));
}
let mut adapters_removed: u32 = 0;
for adapter in adapters {
let adapter_path = adapter.read().path.clone();
if let Some(e) = await!(adapter.write().forget(peer_id.clone()))?.error {
match *e {
FIDLBTError { error_code: FIDLBTErrorCode::NotFound, .. } => {
fx_vlog!(1, "No peer {} on adapter {:?}; ignoring", peer_id, adapter_path)
}
_ => {
fx_log_err!(
"Could not forget peer {} on adapter {:?}",
peer_id,
adapter_path
);
return Ok(fidl_fuchsia_bluetooth::Status { error: Some(e) });
}
}
} else {
adapters_removed += 1;
}
}
match self.state.write().stash.rm_peer(&peer_id) {
Err(_) => return Ok(bt_fidl_status!(Failed, "Couldn't remove peer")),
Ok(_) => (),
}
if adapters_removed == 0 {
return Ok(bt_fidl_status!(Failed, "No adapters had peer"));
}
Ok(bt_fidl_status!())
}
pub async fn disconnect(
&mut self,
device_id: String,
) -> fidl::Result<fidl_fuchsia_bluetooth::Status> {
let adapter = await!(self.get_active_adapter())?;
match adapter {
Some(adapter) => await!(adapter.write().rm_gatt(device_id)),
None => Ok(bt_fidl_status!(BluetoothNotAvailable, "Adapter went away")),
}
}
pub async fn get_active_adapter(&mut self) -> fidl::Result<Option<Arc<RwLock<HostDevice>>>> {
let adapter = await!(self.on_adapters_found())?;
let mut wstate = adapter.state.write();
Ok(wstate.get_active_host())
}
pub async fn get_all_adapters(&self) -> Vec<Arc<RwLock<HostDevice>>> {
let _ = await!(self.on_adapters_found());
self.state.read().host_devices.values().cloned().collect()
}
pub async fn get_adapters(&self) -> fidl::Result<Vec<AdapterInfo>> {
let hosts = self.state.read();
Ok(hosts
.host_devices
.values()
.map(|host| clone_host_info(host.read().get_info()))
.collect())
}
pub async fn request_host_service(mut self, chan: fasync::Channel, service: HostService) {
let adapter = await!(self.get_active_adapter());
match adapter {
Ok(Some(adapter)) => {
let adapter = adapter.read();
let host = adapter.get_host();
match service {
HostService::LeCentral => {
let remote = ServerEnd::<CentralMarker>::new(chan.into());
let _ = host.request_low_energy_central(remote);
}
HostService::LePeripheral => {
let remote = ServerEnd::<PeripheralMarker>::new(chan.into());
let _ = host.request_low_energy_peripheral(remote);
}
HostService::LeGatt => {
let remote = ServerEnd::<Server_Marker>::new(chan.into());
let _ = host.request_gatt_server_(remote);
}
HostService::Profile => {
let remote = ServerEnd::<ProfileMarker>::new(chan.into());
let _ = host.request_profile(remote);
}
}
}
Ok(None) => eprintln!("Failed to spawn, no active adapter"),
Err(e) => eprintln!("Failed to spawn, error resolving adapter {:?}", e),
}
}
pub fn set_io_capability(&self, input: InputCapabilityType, output: OutputCapabilityType) {
let mut state = self.state.write();
state.input = input;
state.output = output;
}
pub fn add_event_listener(&self, handle: Weak<ControlControlHandle>) {
self.state.write().event_listeners.push(handle);
}
pub fn notify_event_listeners<F>(&self, f: F)
where
F: FnMut(&ControlControlHandle) -> (),
{
self.state.write().notify_event_listeners(f);
}
/// Returns the current pairing delegate proxy if it exists and has not been closed. Clears the
/// if the handle is closed.
pub fn pairing_delegate(&self) -> Option<PairingDelegateProxy> {
self.state.write().pairing_delegate()
}
pub fn store_bond(
&self,
bond_data: fidl_fuchsia_bluetooth_host::BondingData,
) -> Result<(), Error> {
self.state.write().stash.store_bond(bond_data)
}
pub fn on_device_updated(&self, mut device: RemoteDevice) {
// TODO(NET-1297): generic method for this pattern
self.notify_event_listeners(|listener| {
let _res = listener
.send_on_device_updated(&mut device)
.map_err(|e| fx_log_err!("Failed to send device updated event: {:?}", e));
});
let _drop_old_value =
self.state.write().remote_devices.insert(device.identifier.clone(), device);
}
pub fn on_device_removed(&self, identifier: String) {
self.state.write().remote_devices.remove(&identifier);
self.notify_event_listeners(|listener| {
let _res = listener
.send_on_device_removed(&identifier)
.map_err(|e| fx_log_err!("Failed to send device removed event: {:?}", e));
})
}
pub fn get_remote_devices(&self) -> Vec<RemoteDevice> {
self.state.read().remote_devices.values().map(clone_remote_device).collect()
}
/// Adds an adapter to the host dispatcher. Called by the watch_hosts device
/// watcher
pub async fn add_adapter(self, host_path: &Path) -> Result<(), Error> {
let host_dev = bt::hci::open_rdwr(host_path)?;
let device_topo = fdio::device_get_topo_path(&host_dev)?;
fx_log_info!("Adding Adapter: {:?} (topology: {:?})", host_path, device_topo);
let host_device = await!(init_host(host_path))?;
// TODO(armansito): Make sure that the bt-host device is left in a well-known state if any
// of these operations fails.
// TODO(PKG-47): The following code applies a number of configurations to the bt-host by
// default. We should tie these to a package configuration (once it is possible), as some of these
// are undesirable in certain situations, e.g when running PTS tests.
//
// Currently applied settings:
// - LE Privacy with IRK
// - LE background scan for auto-connection
// - BR/EDR connectable mode
let address = host_device.read().get_info().address.clone();
assign_host_data(host_device.clone(), self.clone(), &address)?;
await!(try_restore_bonds(host_device.clone(), self.clone(), &address))?;
// Enable privacy by default.
host_device.read().enable_privacy(true)?;
// TODO(NET-1445): Only the active host should be made connectable and scanning in the background.
await!(host_device.read().set_connectable(true))
.map_err(|_| BTError::new("failed to set connectable"))?;
host_device
.read()
.enable_background_scan(true)
.map_err(|_| BTError::new("failed to enable background scan"))?;
// Initialize bt-gap as this host's pairing delegate.
start_pairing_delegate(self.clone(), host_device.clone())?;
let id = host_device.read().get_info().identifier.clone();
self.state.write().add_host(id, host_device.clone());
// Start listening to Host interface events.
fasync::spawn(host_device::run(self.clone(), host_device.clone()).map(|_| ()));
Ok(())
}
pub fn rm_adapter(self, host_path: &Path) {
fx_log_info!("Host removed: {:?}", host_path);
let mut hd = self.state.write();
let active_id = hd.active_id.clone();
// Get the host IDs that match `host_path`.
let ids: Vec<String> = hd
.host_devices
.iter()
.filter(|(_, ref host)| host.read().path == host_path)
.map(|(k, _)| k.clone())
.collect();
for id in &ids {
hd.host_devices.remove(id);
hd.notify_event_listeners(|listener| {
let _ = listener.send_on_adapter_removed(id);
})
}
// Reset the active ID if it got removed.
if let Some(active_id) = active_id {
if ids.contains(&active_id) {
hd.active_id = None;
}
}
// Try to assign a new active adapter. This may send an "OnActiveAdapterChanged" event.
if hd.active_id.is_none() {
let _ = hd.get_active_id();
}
}
pub async fn connect(
&mut self,
device_id: String,
) -> fidl::Result<fidl_fuchsia_bluetooth::Status> {
let adapter = await!(self.get_active_adapter())?;
match adapter {
Some(adapter) => await!(adapter.write().connect(device_id)),
None => Ok(bt_fidl_status!(BluetoothNotAvailable, "Adapter went away")),
}
}
}
/// A future that completes when at least one adapter is available.
#[must_use = "futures do nothing unless polled"]
struct OnAdaptersFound {
hd: HostDispatcher,
waker_key: Option<usize>,
}
impl OnAdaptersFound {
// Constructs an OnAdaptersFound that completes at the latest after HOST_INIT_TIMEOUT seconds.
fn new(hd: HostDispatcher) -> impl Future<Output = fidl::Result<HostDispatcher>> {
OnAdaptersFound { hd: hd.clone(), waker_key: None }.on_timeout(
Duration::from_seconds(HOST_INIT_TIMEOUT).after_now(),
move || {
{
let mut inner = hd.state.write();
if inner.host_devices.len() == 0 {
fx_log_info!("No bt-host devices found");
inner.resolve_host_requests();
}
}
Ok(hd)
},
)
}
fn remove_waker(&mut self) {
if let Some(key) = self.waker_key {
self.hd.state.write().host_requests.remove(key);
}
self.waker_key = None;
}
}
impl Drop for OnAdaptersFound {
fn drop(&mut self) {
self.remove_waker()
}
}
impl Unpin for OnAdaptersFound {}
impl Future for OnAdaptersFound {
type Output = fidl::Result<HostDispatcher>;
fn poll(mut self: ::std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.hd.state.read().host_devices.len() == 0 {
let hd = self.hd.clone();
if self.waker_key.is_none() {
self.waker_key = Some(hd.state.write().host_requests.insert(cx.waker().clone()));
}
Poll::Pending
} else {
self.remove_waker();
Poll::Ready(Ok(self.hd.clone()))
}
}
}
/// Initialize a HostDevice
async fn init_host(path: &Path) -> Result<Arc<RwLock<HostDevice>>, Error> {
// Connect to the host device.
let host = File::open(path).map_err(|_| BTError::new("failed to open bt-host device"))?;
let handle = bt::host::open_host_channel(&host)?;
let handle = fasync::Channel::from_channel(handle.into())?;
let host = HostProxy::new(handle);
// Obtain basic information and create and entry in the disptacher's map.
let adapter_info = await!(host.get_info())
.map_err(|_| BTError::new("failed to obtain bt-host information"))?;
Ok(Arc::new(RwLock::new(HostDevice::new(path.to_path_buf(), host, adapter_info))))
}
async fn try_restore_bonds(
host_device: Arc<RwLock<HostDevice>>,
hd: HostDispatcher,
address: &str,
) -> Result<(), Error> {
// Load bonding data that use this host's `address` as their "local identity address".
let opt_data: Option<Vec<_>> = {
let lock = hd.state.read();
lock.stash
.list_bonds(address)
.map(|iter| iter.map(clone_bonding_data).collect())
};
let data = match opt_data {
Some(data) => data,
None => return Ok(())
};
let res = await!(host_device.read().restore_bonds(data));
match res {
Ok(_) => Ok(()),
Err(e) => {
fx_log_err!("failed to restore bonding data for host: {}", e);
Err(e.into())
}
}
}
fn generate_irk() -> Result<LocalKey, zx::Status> {
let mut buf: [u8; 16] = [0; 16];
zx::cprng_draw(&mut buf)?;
Ok(LocalKey { value: buf })
}
fn assign_host_data(
host_device: Arc<RwLock<HostDevice>>,
hd: HostDispatcher,
address: &str,
) -> Result<(), Error> {
// Obtain an existing IRK or generate a new one if one doesn't already exists for |address|.
let stash = &mut hd.state.write().stash;
let data = match stash.get_host_data(address) {
Some(host_data) => {
fx_vlog!(1, "restored IRK");
clone_host_data(host_data)
}
None => {
// Generate a new IRK.
fx_vlog!(1, "generating new IRK");
let new_data = HostData { irk: Some(Box::new(generate_irk()?)) };
if let Err(e) = stash.store_host_data(address, clone_host_data(&new_data)) {
fx_log_err!("failed to persist local IRK");
return Err(e);
}
new_data
}
};
host_device.read().set_local_data(data).map_err(|e| BTError::from(e).into())
}
fn start_pairing_delegate(
hd: HostDispatcher,
host_device: Arc<RwLock<HostDevice>>,
) -> Result<(), Error> {
// Initialize bt-gap as this host's pairing delegate.
// TODO(NET-1445): Do this only for the active host. This will make sure that non-active hosts
// always reject pairing.
let (delegate_client_end, delegate_stream) = fidl::endpoints::create_request_stream()?;
host_device.read().set_host_pairing_delegate(
hd.state.read().input,
hd.state.read().output,
delegate_client_end,
);
fasync::spawn(
services::start_pairing_delegate(hd.clone(), delegate_stream)
.unwrap_or_else(|e| eprintln!("Failed to spawn {:?}", e)),
);
Ok(())
}
| 36.443272 | 106 | 0.585143 |
9c4a4b3d3cd8c556bfd257b4c432c31015d25168
| 13,703 |
// Copyright 2018 Brian Smith.
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY
// SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
// OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
// CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
use super::{counter, iv::Iv, quic::Sample, Block, Direction, BLOCK_LEN};
use crate::{bits::BitLength, c, cpu, endian::*, error, polyfill};
pub(crate) struct Key {
inner: AES_KEY,
cpu_features: cpu::Features,
}
macro_rules! set_encrypt_key {
( $name:ident, $bytes:expr, $key_bits:expr, $key:expr ) => {{
extern "C" {
fn $name(user_key: *const u8, bits: c::uint, key: &mut AES_KEY) -> c::int;
}
set_encrypt_key($name, $bytes, $key_bits, $key)
}};
}
#[inline]
fn set_encrypt_key(
f: unsafe extern "C" fn(*const u8, c::uint, &mut AES_KEY) -> c::int,
bytes: &[u8],
key_bits: BitLength,
key: &mut AES_KEY,
) -> Result<(), error::Unspecified> {
// Unusually, in this case zero means success and non-zero means failure.
if 0 == unsafe { f(bytes.as_ptr(), key_bits.as_usize_bits() as c::uint, key) } {
Ok(())
} else {
Err(error::Unspecified)
}
}
macro_rules! encrypt_block {
($name:ident, $block:expr, $key:expr) => {{
extern "C" {
fn $name(a: &Block, r: *mut Block, key: &AES_KEY);
}
encrypt_block_($name, $block, $key)
}};
}
#[inline]
fn encrypt_block_(
f: unsafe extern "C" fn(&Block, *mut Block, &AES_KEY),
a: Block,
key: &Key,
) -> Block {
let mut result = core::mem::MaybeUninit::uninit();
unsafe {
f(&a, result.as_mut_ptr(), &key.inner);
result.assume_init()
}
}
macro_rules! ctr32_encrypt_blocks {
($name:ident, $in_out:expr, $in_prefix_len:expr, $key:expr, $ivec:expr ) => {{
extern "C" {
fn $name(
input: *const u8,
output: *mut u8,
blocks: c::size_t,
key: &AES_KEY,
ivec: &Counter,
);
}
ctr32_encrypt_blocks_($name, $in_out, $in_prefix_len, $key, $ivec)
}};
}
#[inline]
fn ctr32_encrypt_blocks_(
f: unsafe extern "C" fn(
input: *const u8,
output: *mut u8,
blocks: c::size_t,
key: &AES_KEY,
ivec: &Counter,
),
in_out: &mut [u8],
in_prefix_len: usize,
key: &AES_KEY,
ctr: &mut Counter,
) {
let in_out_len = in_out.len().checked_sub(in_prefix_len).unwrap();
assert_eq!(in_out_len % BLOCK_LEN, 0);
let blocks = in_out_len / BLOCK_LEN;
let blocks_u32 = blocks as u32;
assert_eq!(blocks, polyfill::usize_from_u32(blocks_u32));
let input = in_out[in_prefix_len..].as_ptr();
let output = in_out.as_mut_ptr();
unsafe {
f(input, output, blocks, &key, ctr);
}
ctr.increment_by_less_safe(blocks_u32);
}
impl Key {
#[inline]
pub fn new(
bytes: &[u8],
variant: Variant,
cpu_features: cpu::Features,
) -> Result<Self, error::Unspecified> {
let key_bits = match variant {
Variant::AES_128 => BitLength::from_usize_bits(128),
Variant::AES_256 => BitLength::from_usize_bits(256),
};
if BitLength::from_usize_bytes(bytes.len())? != key_bits {
return Err(error::Unspecified);
}
let mut key = AES_KEY {
rd_key: [0u32; 4 * (MAX_ROUNDS + 1)],
rounds: 0,
};
match detect_implementation(cpu_features) {
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
))]
Implementation::HWAES => {
set_encrypt_key!(GFp_aes_hw_set_encrypt_key, bytes, key_bits, &mut key)?
}
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86"
))]
Implementation::VPAES_BSAES => {
set_encrypt_key!(GFp_vpaes_set_encrypt_key, bytes, key_bits, &mut key)?
}
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
Implementation::NOHW => {
set_encrypt_key!(GFp_aes_nohw_set_encrypt_key, bytes, key_bits, &mut key)?
}
};
Ok(Self {
inner: key,
cpu_features,
})
}
#[inline]
pub fn encrypt_block(&self, a: Block) -> Block {
match detect_implementation(self.cpu_features) {
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
))]
Implementation::HWAES => encrypt_block!(GFp_aes_hw_encrypt, a, self),
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86"
))]
Implementation::VPAES_BSAES => encrypt_block!(GFp_vpaes_encrypt, a, self),
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
Implementation::NOHW => encrypt_block!(GFp_aes_nohw_encrypt, a, self),
}
}
#[inline]
pub fn encrypt_iv_xor_block(&self, iv: Iv, input: Block) -> Block {
let mut output = self.encrypt_block(Block::from(&iv.into_bytes_less_safe()));
output.bitxor_assign(input);
output
}
#[inline]
pub(super) fn ctr32_encrypt_blocks(
&self,
in_out: &mut [u8],
direction: Direction,
ctr: &mut Counter,
) {
let in_prefix_len = match direction {
Direction::Opening { in_prefix_len } => in_prefix_len,
Direction::Sealing => 0,
};
let in_out_len = in_out.len().checked_sub(in_prefix_len).unwrap();
assert_eq!(in_out_len % BLOCK_LEN, 0);
match detect_implementation(self.cpu_features) {
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
))]
Implementation::HWAES => ctr32_encrypt_blocks!(
GFp_aes_hw_ctr32_encrypt_blocks,
in_out,
in_prefix_len,
&self.inner,
ctr
),
#[cfg(any(target_arch = "aarch64", target_arch = "arm", target_arch = "x86_64"))]
Implementation::VPAES_BSAES => {
// 8 blocks is the cut-off point where it's faster to use BSAES.
#[cfg(target_arch = "arm")]
let in_out = if in_out_len >= 8 * BLOCK_LEN {
let remainder = in_out_len % (8 * BLOCK_LEN);
let bsaes_in_out_len = if remainder < (4 * BLOCK_LEN) {
in_out_len - remainder
} else {
in_out_len
};
let mut bsaes_key = AES_KEY {
rd_key: [0u32; 4 * (MAX_ROUNDS + 1)],
rounds: 0,
};
extern "C" {
fn GFp_vpaes_encrypt_key_to_bsaes(
bsaes_key: &mut AES_KEY,
vpaes_key: &AES_KEY,
);
}
unsafe {
GFp_vpaes_encrypt_key_to_bsaes(&mut bsaes_key, &self.inner);
}
ctr32_encrypt_blocks!(
GFp_bsaes_ctr32_encrypt_blocks,
&mut in_out[..(bsaes_in_out_len + in_prefix_len)],
in_prefix_len,
&bsaes_key,
ctr
);
&mut in_out[bsaes_in_out_len..]
} else {
in_out
};
ctr32_encrypt_blocks!(
GFp_vpaes_ctr32_encrypt_blocks,
in_out,
in_prefix_len,
&self.inner,
ctr
)
}
#[cfg(any(target_arch = "x86"))]
Implementation::VPAES_BSAES => {
super::shift::shift_full_blocks(in_out, in_prefix_len, |input| {
self.encrypt_iv_xor_block(ctr.increment(), Block::from(input))
});
}
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
Implementation::NOHW => ctr32_encrypt_blocks!(
GFp_aes_nohw_ctr32_encrypt_blocks,
in_out,
in_prefix_len,
&self.inner,
ctr
),
}
}
pub fn new_mask(&self, sample: Sample) -> [u8; 5] {
let block = self.encrypt_block(Block::from(&sample));
let mut out: [u8; 5] = [0; 5];
out.copy_from_slice(&block.as_ref()[..5]);
out
}
// TODO: use `matches!` when MSRV increases to 1.42.0 and remove this
// `#[allow(...)]`
#[allow(clippy::unknown_clippy_lints)]
#[allow(clippy::match_like_matches_macro)]
#[cfg(target_arch = "x86_64")]
#[must_use]
pub fn is_aes_hw(&self) -> bool {
match detect_implementation(self.cpu_features) {
Implementation::HWAES => true,
_ => false,
}
}
#[cfg(target_arch = "x86_64")]
#[must_use]
pub(super) fn inner_less_safe(&self) -> &AES_KEY {
&self.inner
}
}
// Keep this in sync with AES_KEY in aes.h.
#[repr(C)]
pub(super) struct AES_KEY {
pub rd_key: [u32; 4 * (MAX_ROUNDS + 1)],
pub rounds: c::uint,
}
// Keep this in sync with `AES_MAXNR` in aes.h.
const MAX_ROUNDS: usize = 14;
pub enum Variant {
AES_128,
AES_256,
}
pub type Counter = counter::Counter<BigEndian<u32>>;
#[repr(C)] // Only so `Key` can be `#[repr(C)]`
#[derive(Clone, Copy)]
pub enum Implementation {
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
))]
HWAES = 1,
// On "arm" only, this indicates that the bsaes implementation may be used.
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86"
))]
VPAES_BSAES = 2,
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
NOHW = 3,
}
fn detect_implementation(cpu_features: cpu::Features) -> Implementation {
// `cpu_features` is only used for specific platforms.
#[cfg(not(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
)))]
let _cpu_features = cpu_features;
#[cfg(any(
target_arch = "aarch64",
target_arch = "arm",
target_arch = "x86_64",
target_arch = "x86",
target_arch = "s390x"
))]
{
if cpu::intel::AES.available(cpu_features) || cpu::arm::AES.available(cpu_features) {
return Implementation::HWAES;
}
}
#[cfg(any(target_arch = "x86_64", target_arch = "x86"))]
{
if cpu::intel::SSSE3.available(cpu_features) {
return Implementation::VPAES_BSAES;
}
}
#[cfg(target_arch = "arm")]
{
if cpu::arm::NEON.available(cpu_features) {
return Implementation::VPAES_BSAES;
}
}
#[cfg(target_arch = "aarch64")]
{
Implementation::VPAES_BSAES
}
#[cfg(target_arch = "s390x")]
{
Implementation::HWAES
}
#[cfg(not(any(target_arch = "aarch64", target_arch = "s390x")))]
{
Implementation::NOHW
}
}
#[cfg(test)]
mod tests {
use super::{super::BLOCK_LEN, *};
use crate::test;
use core::convert::TryInto;
#[test]
pub fn test_aes() {
test::run(test_file!("aes_tests.txt"), |section, test_case| {
assert_eq!(section, "");
let key = consume_key(test_case, "Key");
let input = test_case.consume_bytes("Input");
let input: &[u8; BLOCK_LEN] = input.as_slice().try_into()?;
let expected_output = test_case.consume_bytes("Output");
let block = Block::from(input);
let output = key.encrypt_block(block);
assert_eq!(output.as_ref(), &expected_output[..]);
Ok(())
})
}
fn consume_key(test_case: &mut test::TestCase, name: &str) -> Key {
let key = test_case.consume_bytes(name);
let variant = match key.len() {
16 => Variant::AES_128,
32 => Variant::AES_256,
_ => unreachable!(),
};
Key::new(&key[..], variant, cpu::features()).unwrap()
}
}
| 30.116484 | 93 | 0.52733 |
2faaa8ff071f0103a5d1612d6a78b8619ee9349f
| 4,410 |
//! Handles the `Enter` key press. At the momently, this only continues
//! comments, but should handle indent some time in the future as well.
use ra_db::{FilePosition, SourceDatabase};
use ra_ide_db::RootDatabase;
use ra_syntax::{
ast::{self, AstToken},
AstNode, SmolStr, SourceFile,
SyntaxKind::*,
SyntaxToken, TextSize, TokenAtOffset,
};
use ra_text_edit::TextEdit;
pub(crate) fn on_enter(db: &RootDatabase, position: FilePosition) -> Option<TextEdit> {
let parse = db.parse(position.file_id);
let file = parse.tree();
let comment = file
.syntax()
.token_at_offset(position.offset)
.left_biased()
.and_then(ast::Comment::cast)?;
if comment.kind().shape.is_block() {
return None;
}
let prefix = comment.prefix();
let comment_range = comment.syntax().text_range();
if position.offset < comment_range.start() + TextSize::of(prefix) {
return None;
}
// Continuing single-line non-doc comments (like this one :) ) is annoying
if prefix == "//" && comment_range.end() == position.offset && !followed_by_comment(&comment) {
return None;
}
let indent = node_indent(&file, comment.syntax())?;
let inserted = format!("\n{}{} $0", indent, prefix);
let edit = TextEdit::insert(position.offset, inserted);
Some(edit)
}
fn followed_by_comment(comment: &ast::Comment) -> bool {
let ws = match comment.syntax().next_token().and_then(ast::Whitespace::cast) {
Some(it) => it,
None => return false,
};
if ws.spans_multiple_lines() {
return false;
}
ws.syntax().next_token().and_then(ast::Comment::cast).is_some()
}
fn node_indent(file: &SourceFile, token: &SyntaxToken) -> Option<SmolStr> {
let ws = match file.syntax().token_at_offset(token.text_range().start()) {
TokenAtOffset::Between(l, r) => {
assert!(r == *token);
l
}
TokenAtOffset::Single(n) => {
assert!(n == *token);
return Some("".into());
}
TokenAtOffset::None => unreachable!(),
};
if ws.kind() != WHITESPACE {
return None;
}
let text = ws.text();
let pos = text.rfind('\n').map(|it| it + 1).unwrap_or(0);
Some(text[pos..].into())
}
#[cfg(test)]
mod tests {
use test_utils::assert_eq_text;
use crate::mock_analysis::analysis_and_position;
use stdx::trim_indent;
fn apply_on_enter(before: &str) -> Option<String> {
let (analysis, position) = analysis_and_position(&before);
let result = analysis.on_enter(position).unwrap()?;
let mut actual = analysis.file_text(position.file_id).unwrap().to_string();
result.apply(&mut actual);
Some(actual)
}
fn do_check(ra_fixture_before: &str, ra_fixture_after: &str) {
let ra_fixture_after = &trim_indent(ra_fixture_after);
let actual = apply_on_enter(ra_fixture_before).unwrap();
assert_eq_text!(ra_fixture_after, &actual);
}
fn do_check_noop(ra_fixture_text: &str) {
assert!(apply_on_enter(ra_fixture_text).is_none())
}
#[test]
fn continues_doc_comment() {
do_check(
r"
/// Some docs<|>
fn foo() {
}
",
r"
/// Some docs
/// $0
fn foo() {
}
",
);
do_check(
r"
impl S {
/// Some<|> docs.
fn foo() {}
}
",
r"
impl S {
/// Some
/// $0 docs.
fn foo() {}
}
",
);
do_check(
r"
///<|> Some docs
fn foo() {
}
",
r"
///
/// $0 Some docs
fn foo() {
}
",
);
}
#[test]
fn does_not_continue_before_doc_comment() {
do_check_noop(r"<|>//! docz");
}
#[test]
fn continues_code_comment_in_the_middle_of_line() {
do_check(
r"
fn main() {
// Fix<|> me
let x = 1 + 1;
}
",
r"
fn main() {
// Fix
// $0 me
let x = 1 + 1;
}
",
);
}
#[test]
fn continues_code_comment_in_the_middle_several_lines() {
do_check(
r"
fn main() {
// Fix<|>
// me
let x = 1 + 1;
}
",
r"
fn main() {
// Fix
// $0
// me
let x = 1 + 1;
}
",
);
}
#[test]
fn does_not_continue_end_of_code_comment() {
do_check_noop(
r"
fn main() {
// Fix me<|>
let x = 1 + 1;
}
",
);
}
}
| 21.407767 | 99 | 0.552834 |
ef52b9dd5464d323fd953ea8c2d5faa3478146be
| 3,424 |
use std::time::Duration;
use nu_protocol::{
ast::Call,
engine::{Command, EngineState, Stack},
Category, Example, IntoInterruptiblePipelineData, PipelineData, ShellError, Signature, Value,
};
#[derive(Clone)]
pub struct Ps;
impl Command for Ps {
fn name(&self) -> &str {
"ps"
}
fn signature(&self) -> Signature {
Signature::build("ps")
.desc("View information about system processes.")
.switch(
"long",
"list all available columns for each entry",
Some('l'),
)
.filter()
.category(Category::System)
}
fn usage(&self) -> &str {
"View information about system processes."
}
fn run(
&self,
engine_state: &EngineState,
_stack: &mut Stack,
call: &Call,
_input: PipelineData,
) -> Result<nu_protocol::PipelineData, nu_protocol::ShellError> {
run_ps(engine_state, call)
}
fn examples(&self) -> Vec<Example> {
vec![Example {
description: "List the system processes",
example: "ps",
result: None,
}]
}
}
fn run_ps(engine_state: &EngineState, call: &Call) -> Result<PipelineData, ShellError> {
let mut output = vec![];
let span = call.head;
let long = call.has_flag("long");
for proc in nu_system::collect_proc(Duration::from_millis(100), false) {
let mut cols = vec![];
let mut vals = vec![];
cols.push("pid".to_string());
vals.push(Value::Int {
val: proc.pid() as i64,
span,
});
cols.push("name".to_string());
vals.push(Value::String {
val: proc.name(),
span,
});
#[cfg(not(windows))]
{
// Hide status on Windows until we can find a good way to support it
cols.push("status".to_string());
vals.push(Value::String {
val: proc.status(),
span,
});
}
cols.push("cpu".to_string());
vals.push(Value::Float {
val: proc.cpu_usage(),
span,
});
cols.push("mem".to_string());
vals.push(Value::Filesize {
val: proc.mem_size() as i64,
span,
});
cols.push("virtual".to_string());
vals.push(Value::Filesize {
val: proc.virtual_size() as i64,
span,
});
if long {
cols.push("command".to_string());
vals.push(Value::String {
val: proc.command(),
span,
});
#[cfg(windows)]
{
cols.push("cwd".to_string());
vals.push(Value::String {
val: proc.cwd(),
span,
});
cols.push("environment".to_string());
vals.push(Value::List {
vals: proc
.environ()
.iter()
.map(|x| Value::string(x.to_string(), span))
.collect(),
span,
});
}
}
output.push(Value::Record { cols, vals, span });
}
Ok(output
.into_iter()
.into_pipeline_data(engine_state.ctrlc.clone()))
}
| 25.744361 | 97 | 0.465537 |
4bc95203e32573cea6e605ab7ebd618f4a2ba2b7
| 19,531 |
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ValidatorSet {
#[prost(message, repeated, tag="1")]
pub validators: ::prost::alloc::vec::Vec<Validator>,
#[prost(message, optional, tag="2")]
pub proposer: ::core::option::Option<Validator>,
#[prost(int64, tag="3")]
pub total_voting_power: i64,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Validator {
#[prost(bytes="vec", tag="1")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub address: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="2")]
pub pub_key: ::core::option::Option<super::crypto::PublicKey>,
#[prost(int64, tag="3")]
#[serde(alias = "power", with = "crate::serializers::from_str")]
pub voting_power: i64,
#[prost(int64, tag="4")]
#[serde(with = "crate::serializers::from_str", default)]
pub proposer_priority: i64,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SimpleValidator {
#[prost(message, optional, tag="1")]
pub pub_key: ::core::option::Option<super::crypto::PublicKey>,
#[prost(int64, tag="2")]
pub voting_power: i64,
}
/// PartsetHeader
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct PartSetHeader {
#[prost(uint32, tag="1")]
#[serde(with = "crate::serializers::part_set_header_total")]
pub total: u32,
#[prost(bytes="vec", tag="2")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub hash: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Part {
#[prost(uint32, tag="1")]
pub index: u32,
#[prost(bytes="vec", tag="2")]
pub bytes: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="3")]
pub proof: ::core::option::Option<super::crypto::Proof>,
}
/// BlockID
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BlockId {
#[prost(bytes="vec", tag="1")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub hash: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="2")]
#[serde(alias = "parts")]
pub part_set_header: ::core::option::Option<PartSetHeader>,
}
// --------------------------------
/// Header defines the structure of a Tendermint block header.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Header {
/// basic block info
#[prost(message, optional, tag="1")]
pub version: ::core::option::Option<super::version::Consensus>,
#[prost(string, tag="2")]
pub chain_id: ::prost::alloc::string::String,
#[prost(int64, tag="3")]
#[serde(with = "crate::serializers::from_str")]
pub height: i64,
#[prost(message, optional, tag="4")]
#[serde(with = "crate::serializers::optional")]
pub time: ::core::option::Option<super::super::google::protobuf::Timestamp>,
/// prev block info
#[prost(message, optional, tag="5")]
pub last_block_id: ::core::option::Option<BlockId>,
/// hashes of block data
///
/// commit from validators from the last block
#[prost(bytes="vec", tag="6")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub last_commit_hash: ::prost::alloc::vec::Vec<u8>,
/// transactions
#[prost(bytes="vec", tag="7")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub data_hash: ::prost::alloc::vec::Vec<u8>,
/// hashes from the app output from the prev block
///
/// validators for the current block
#[prost(bytes="vec", tag="8")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub validators_hash: ::prost::alloc::vec::Vec<u8>,
/// validators for the next block
#[prost(bytes="vec", tag="9")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub next_validators_hash: ::prost::alloc::vec::Vec<u8>,
/// consensus params for current block
#[prost(bytes="vec", tag="10")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub consensus_hash: ::prost::alloc::vec::Vec<u8>,
/// state after txs from the previous block
#[prost(bytes="vec", tag="11")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub app_hash: ::prost::alloc::vec::Vec<u8>,
/// root hash of all results from the txs from the previous block
#[prost(bytes="vec", tag="12")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub last_results_hash: ::prost::alloc::vec::Vec<u8>,
/// consensus info
///
/// evidence included in the block
#[prost(bytes="vec", tag="13")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub evidence_hash: ::prost::alloc::vec::Vec<u8>,
/// original proposer of the block
#[prost(bytes="vec", tag="14")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub proposer_address: ::prost::alloc::vec::Vec<u8>,
}
/// Data contains the set of transactions included in the block
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Data {
/// Txs that will be applied by state @ block.Height+1.
/// NOTE: not all txs here are valid. We're just agreeing on the order first.
/// This means that block.AppHash does not include these txs.
#[prost(bytes="vec", repeated, tag="1")]
#[serde(with = "crate::serializers::txs")]
pub txs: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec<u8>>,
}
/// Vote represents a prevote, precommit, or commit vote from validators for
/// consensus.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Vote {
#[prost(enumeration="SignedMsgType", tag="1")]
pub r#type: i32,
#[prost(int64, tag="2")]
#[serde(with = "crate::serializers::from_str")]
pub height: i64,
#[prost(int32, tag="3")]
#[serde(with = "crate::serializers::from_str")]
pub round: i32,
/// zero if vote is nil.
#[prost(message, optional, tag="4")]
pub block_id: ::core::option::Option<BlockId>,
#[prost(message, optional, tag="5")]
#[serde(with = "crate::serializers::optional")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
#[prost(bytes="vec", tag="6")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub validator_address: ::prost::alloc::vec::Vec<u8>,
#[prost(int32, tag="7")]
#[serde(with = "crate::serializers::from_str")]
pub validator_index: i32,
#[prost(bytes="vec", tag="8")]
#[serde(with = "crate::serializers::bytes::base64string")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
/// Commit contains the evidence that a block was committed by a set of validators.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Commit {
#[prost(int64, tag="1")]
#[serde(with = "crate::serializers::from_str")]
pub height: i64,
#[prost(int32, tag="2")]
pub round: i32,
#[prost(message, optional, tag="3")]
pub block_id: ::core::option::Option<BlockId>,
#[prost(message, repeated, tag="4")]
#[serde(with = "crate::serializers::nullable")]
pub signatures: ::prost::alloc::vec::Vec<CommitSig>,
}
/// CommitSig is a part of the Vote included in a Commit.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CommitSig {
#[prost(enumeration="BlockIdFlag", tag="1")]
pub block_id_flag: i32,
#[prost(bytes="vec", tag="2")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub validator_address: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="3")]
#[serde(with = "crate::serializers::optional")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
#[prost(bytes="vec", tag="4")]
#[serde(with = "crate::serializers::bytes::base64string")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Proposal {
#[prost(enumeration="SignedMsgType", tag="1")]
pub r#type: i32,
#[prost(int64, tag="2")]
pub height: i64,
#[prost(int32, tag="3")]
pub round: i32,
#[prost(int32, tag="4")]
pub pol_round: i32,
#[prost(message, optional, tag="5")]
pub block_id: ::core::option::Option<BlockId>,
#[prost(message, optional, tag="6")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
#[prost(bytes="vec", tag="7")]
pub signature: ::prost::alloc::vec::Vec<u8>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct SignedHeader {
#[prost(message, optional, tag="1")]
pub header: ::core::option::Option<Header>,
#[prost(message, optional, tag="2")]
pub commit: ::core::option::Option<Commit>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LightBlock {
#[prost(message, optional, tag="1")]
pub signed_header: ::core::option::Option<SignedHeader>,
#[prost(message, optional, tag="2")]
pub validator_set: ::core::option::Option<ValidatorSet>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BlockMeta {
#[prost(message, optional, tag="1")]
pub block_id: ::core::option::Option<BlockId>,
#[prost(int64, tag="2")]
#[serde(with = "crate::serializers::from_str")]
pub block_size: i64,
#[prost(message, optional, tag="3")]
pub header: ::core::option::Option<Header>,
#[prost(int64, tag="4")]
#[serde(with = "crate::serializers::from_str")]
pub num_txs: i64,
}
/// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct TxProof {
#[prost(bytes="vec", tag="1")]
#[serde(with = "crate::serializers::bytes::hexstring")]
pub root_hash: ::prost::alloc::vec::Vec<u8>,
#[prost(bytes="vec", tag="2")]
#[serde(with = "crate::serializers::bytes::base64string")]
pub data: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="3")]
pub proof: ::core::option::Option<super::crypto::Proof>,
}
/// BlockIdFlag indicates which BlcokID the signature is for
#[derive(::num_derive::FromPrimitive, ::num_derive::ToPrimitive)]
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum BlockIdFlag {
Unknown = 0,
Absent = 1,
Commit = 2,
Nil = 3,
}
/// SignedMsgType is a type of signed message in the consensus.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum SignedMsgType {
Unknown = 0,
/// Votes
Prevote = 1,
Precommit = 2,
/// Proposals
Proposal = 32,
}
/// ConsensusParams contains consensus critical parameters that determine the
/// validity of blocks.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ConsensusParams {
#[prost(message, optional, tag="1")]
pub block: ::core::option::Option<BlockParams>,
#[prost(message, optional, tag="2")]
pub evidence: ::core::option::Option<EvidenceParams>,
#[prost(message, optional, tag="3")]
pub validator: ::core::option::Option<ValidatorParams>,
#[prost(message, optional, tag="4")]
pub version: ::core::option::Option<VersionParams>,
}
/// BlockParams contains limits on the block size.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct BlockParams {
/// Max block size, in bytes.
/// Note: must be greater than 0
#[prost(int64, tag="1")]
pub max_bytes: i64,
/// Max gas per block.
/// Note: must be greater or equal to -1
#[prost(int64, tag="2")]
pub max_gas: i64,
}
/// EvidenceParams determine how we handle evidence of malfeasance.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EvidenceParams {
/// Max age of evidence, in blocks.
///
/// The basic formula for calculating this is: MaxAgeDuration / {average block
/// time}.
#[prost(int64, tag="1")]
pub max_age_num_blocks: i64,
/// Max age of evidence, in time.
///
/// It should correspond with an app's "unbonding period" or other similar
/// mechanism for handling [Nothing-At-Stake
/// attacks](<https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed>).
#[prost(message, optional, tag="2")]
pub max_age_duration: ::core::option::Option<super::super::google::protobuf::Duration>,
/// This sets the maximum size of total evidence in bytes that can be committed in a single block.
/// and should fall comfortably under the max block bytes.
/// Default is 1048576 or 1MB
#[prost(int64, tag="3")]
#[serde(with = "crate::serializers::from_str", default)]
pub max_bytes: i64,
}
/// ValidatorParams restrict the public key types validators can use.
/// NOTE: uses ABCI pubkey naming, not Amino names.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ValidatorParams {
#[prost(string, repeated, tag="1")]
pub pub_key_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
}
/// VersionParams contains the ABCI application version.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct VersionParams {
#[prost(uint64, tag="1")]
pub app_version: u64,
}
/// HashedParams is a subset of ConsensusParams.
///
/// It is hashed into the Header.ConsensusHash.
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct HashedParams {
#[prost(int64, tag="1")]
pub block_max_bytes: i64,
#[prost(int64, tag="2")]
pub block_max_gas: i64,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CanonicalBlockId {
#[prost(bytes="vec", tag="1")]
pub hash: ::prost::alloc::vec::Vec<u8>,
#[prost(message, optional, tag="2")]
#[serde(alias = "parts")]
pub part_set_header: ::core::option::Option<CanonicalPartSetHeader>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CanonicalPartSetHeader {
#[prost(uint32, tag="1")]
pub total: u32,
#[prost(bytes="vec", tag="2")]
pub hash: ::prost::alloc::vec::Vec<u8>,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CanonicalProposal {
/// type alias for byte
#[prost(enumeration="SignedMsgType", tag="1")]
pub r#type: i32,
/// canonicalization requires fixed size encoding here
#[prost(sfixed64, tag="2")]
pub height: i64,
/// canonicalization requires fixed size encoding here
#[prost(sfixed64, tag="3")]
pub round: i64,
#[prost(int64, tag="4")]
pub pol_round: i64,
#[prost(message, optional, tag="5")]
pub block_id: ::core::option::Option<CanonicalBlockId>,
#[prost(message, optional, tag="6")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
#[prost(string, tag="7")]
pub chain_id: ::prost::alloc::string::String,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct CanonicalVote {
/// type alias for byte
#[prost(enumeration="SignedMsgType", tag="1")]
pub r#type: i32,
/// canonicalization requires fixed size encoding here
#[prost(sfixed64, tag="2")]
pub height: i64,
/// canonicalization requires fixed size encoding here
#[prost(sfixed64, tag="3")]
pub round: i64,
#[prost(message, optional, tag="4")]
pub block_id: ::core::option::Option<CanonicalBlockId>,
#[prost(message, optional, tag="5")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
#[prost(string, tag="6")]
pub chain_id: ::prost::alloc::string::String,
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EventDataRoundState {
#[prost(int64, tag="1")]
pub height: i64,
#[prost(int32, tag="2")]
pub round: i32,
#[prost(string, tag="3")]
pub step: ::prost::alloc::string::String,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[serde(from = "crate::serializers::evidence::EvidenceVariant", into = "crate::serializers::evidence::EvidenceVariant")]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Evidence {
#[prost(oneof="evidence::Sum", tags="1, 2")]
pub sum: ::core::option::Option<evidence::Sum>,
}
/// Nested message and enum types in `Evidence`.
pub mod evidence {
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[serde(tag = "type", content = "value")]
#[serde(from = "crate::serializers::evidence::EvidenceVariant", into = "crate::serializers::evidence::EvidenceVariant")]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Sum {
#[prost(message, tag="1")]
#[serde(rename = "tendermint/DuplicateVoteEvidence")]
DuplicateVoteEvidence(super::DuplicateVoteEvidence),
#[prost(message, tag="2")]
#[serde(rename = "tendermint/LightClientAttackEvidence")]
LightClientAttackEvidence(super::LightClientAttackEvidence),
}
}
/// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct DuplicateVoteEvidence {
#[prost(message, optional, tag="1")]
pub vote_a: ::core::option::Option<Vote>,
#[prost(message, optional, tag="2")]
pub vote_b: ::core::option::Option<Vote>,
#[prost(int64, tag="3")]
pub total_voting_power: i64,
#[prost(int64, tag="4")]
pub validator_power: i64,
#[prost(message, optional, tag="5")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
}
/// LightClientAttackEvidence contains evidence of a set of validators attempting to mislead a light client.
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct LightClientAttackEvidence {
#[prost(message, optional, tag="1")]
pub conflicting_block: ::core::option::Option<LightBlock>,
#[prost(int64, tag="2")]
pub common_height: i64,
#[prost(message, repeated, tag="3")]
pub byzantine_validators: ::prost::alloc::vec::Vec<Validator>,
#[prost(int64, tag="4")]
pub total_voting_power: i64,
#[prost(message, optional, tag="5")]
pub timestamp: ::core::option::Option<super::super::google::protobuf::Timestamp>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct EvidenceList {
#[prost(message, repeated, tag="1")]
#[serde(with = "crate::serializers::nullable")]
pub evidence: ::prost::alloc::vec::Vec<Evidence>,
}
#[derive(::serde::Deserialize, ::serde::Serialize)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Block {
#[prost(message, optional, tag="1")]
pub header: ::core::option::Option<Header>,
#[prost(message, optional, tag="2")]
pub data: ::core::option::Option<Data>,
#[prost(message, optional, tag="3")]
pub evidence: ::core::option::Option<EvidenceList>,
#[prost(message, optional, tag="4")]
pub last_commit: ::core::option::Option<Commit>,
}
| 40.104723 | 138 | 0.64861 |
1d807acefe6a359c9dfb0fc890b27eee6810782a
| 893 |
// Copyright 2012-2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(asm)]
fn foo(x: int) { println!("{}", x); }
#[cfg(target_arch = "x86")]
#[cfg(target_arch = "x86_64")]
#[cfg(target_arch = "arm")]
pub fn main() {
let x: int;
unsafe {
asm!("mov $1, $0" : "=r"(x) : "r"(x)); //~ ERROR use of possibly uninitialized variable: `x`
}
foo(x);
}
#[cfg(not(target_arch = "x86"), not(target_arch = "x86_64"), not(target_arch = "arm"))]
pub fn main() {}
| 31.892857 | 100 | 0.650616 |
690c2fdb66cb022af00a3b8fc7e28c75372bfe27
| 3,799 |
use crate::util::{print_part_1, print_part_2};
use fancy_regex::Regex;
use md5::{Digest, Md5};
use std::collections::HashSet;
use std::time::Instant;
fn generate_keys(salt: &str, amount: usize, spacing: usize, part: usize) -> usize {
let mut suffix = 0;
let mut hasher = Md5::new();
let re_three: Regex = Regex::new(r"([a-z]|[0-9])\1{2}").unwrap();
let re_five: Regex = Regex::new(r"([a-z]|[0-9])\1{4}").unwrap();
let mut threes = HashSet::new();
let mut num_valid_keys = 0;
loop {
hasher.update(format!("{}{}", salt, suffix));
let hash = if part == 1 {
hasher.finalize_reset()
} else {
let mut current_hash = hasher.finalize_reset();
for _ in 0..2016 {
hasher.update(format!("{:x}", current_hash));
current_hash = hasher.finalize_reset();
}
current_hash
};
let lowercase_hex_hash = format!("{:x}", hash);
if re_five.is_match(&lowercase_hex_hash).unwrap() {
let result = re_three.captures(&lowercase_hex_hash);
let captures = result
.expect("Error running regex")
.expect("No match found");
let kind = captures
.get(1)
.expect("No group")
.as_str()
.chars()
.nth(0)
.unwrap();
let mut to_remove: Vec<(usize, char)> = vec![];
for &(start, three_kind) in threes.iter() {
if kind == three_kind && start + spacing >= suffix {
to_remove.push((start, three_kind));
}
}
to_remove.sort_by(|&a, &b| a.0.partial_cmp(&b.0).unwrap());
for &(start, three_kind) in to_remove.iter() {
num_valid_keys += 1;
if num_valid_keys == amount {
return start;
}
threes.remove(&(start, three_kind));
}
}
// don't gate the 3-match if there was a 5-match, 5 counts as a new 3 as well!
if re_three.is_match(&lowercase_hex_hash).unwrap() {
let result = re_three.captures(&lowercase_hex_hash);
let captures = result
.expect("Error running regex")
.expect("No match found");
let kind = captures
.get(1)
.expect("No group")
.as_str()
.chars()
.nth(0)
.unwrap();
threes.insert((suffix, kind));
}
suffix += 1;
threes = threes
.into_iter()
.filter(|&(start, _)| start + spacing >= suffix)
.collect();
}
}
pub fn main() {
let input = "jlmsuwbz";
// PART 1
let start = Instant::now();
let known_answer = "35186";
let part_1: usize = generate_keys(&input, 64, 1000, 1);
let duration = start.elapsed();
print_part_1(&part_1.to_string(), &known_answer, duration);
// PART 2
let start = Instant::now();
let known_answer = "22429";
let part_2: usize = generate_keys(&input, 64, 1000, 2);
let duration = start.elapsed();
print_part_2(&part_2.to_string(), &known_answer, duration);
}
// Test cases commented out because they take a very long time
// #[cfg(test)]
// mod tests {
// use super::*;
// #[test]
// fn test_example_1() {
// let input = "abc";
// let answer: usize = generate_keys(&input, 64, 1000, 1);
// assert_eq!(answer, 22728);
// }
// #[test]
// fn test_example_2() {
// let input = "abc";
// let answer: usize = generate_keys(&input, 64, 1000, 2);
// assert_eq!(answer, 22551);
// }
// }
| 33.034783 | 86 | 0.506712 |
167b361ee98fdbbe00de155d6bd07a9862f0d6e3
| 122 |
pub mod details;
pub mod owners;
pub mod repo;
pub mod versions;
mod extract;
mod format;
mod sources;
mod unsafe_check;
| 12.2 | 17 | 0.762295 |
bf11cf06da4067cec8560758e5f5e9ccfdded588
| 686 |
use std::{
env,
path::PathBuf,
fs::File,
io::Write,
};
fn main() {
println!("cargo:rerun-if-env-changed=DEFMT_BBQ_BUFFER_SIZE");
let size = env::var("DEFMT_BBQ_BUFFER_SIZE")
.map(|s| {
s.parse()
.expect("could not parse DEFMT_BBQ_BUFFER_SIZE as usize")
})
.unwrap_or(1024_usize);
let out_dir_path = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let out_file_path = out_dir_path.join("consts.rs");
let mut out_file = File::create(&out_file_path).unwrap();
out_file.write_all(format!("pub(crate) const BUF_SIZE: usize = {};\n", size).as_bytes()).unwrap();
out_file.flush().unwrap();
}
| 26.384615 | 102 | 0.607872 |
b9fbb9a8dbd4ea5952683a3cf0b57cb5e4114b32
| 1,316 |
use crate::types::{Accounts, NewAccountBalancesPayload};
use log::info;
use reqwest::StatusCode;
pub struct Client {
pub api_host: String,
}
impl Client {
#[allow(dead_code)]
pub fn new(api_host: String) -> Client {
Client { api_host }
}
// TODO
// redo this client to use connection pooling in reqwest client
pub fn get_accounts(self) -> Result<Accounts, StatusCode> {
let reqwest_uri = format!("{}/accounts/", self.api_host);
info!("sending reqwest GET {}", &reqwest_uri);
let resp = reqwest::blocking::get(reqwest_uri).unwrap();
match resp.status() {
StatusCode::OK => {
let body = resp.json::<Accounts>().unwrap();
Ok(body)
}
e => Err(e),
}
}
pub fn submit_account_balances(
self,
balances: NewAccountBalancesPayload,
) -> Result<bool, String> {
let client = reqwest::blocking::Client::new();
let reqwest_uri = format!("{}/accounts/balance/", self.api_host);
info!("sending reqwest PUT {}", &reqwest_uri);
let resp = client.put(reqwest_uri).json(&balances).send().unwrap();
match resp.status() {
StatusCode::OK => Ok(true),
e => Err(e.to_string()),
}
}
}
| 26.857143 | 75 | 0.56535 |
9038c5f20e9fdbdc7111c6c99fcd98474cc50e4e
| 1,318 |
#[doc = "Register `RSPR[%s]` reader"]
pub struct R(crate::R<RSPR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<RSPR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<RSPR_SPEC>> for R {
fn from(reader: crate::R<RSPR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Field `RSP` reader - Response"]
pub struct RSP_R(crate::FieldReader<u32, u32>);
impl RSP_R {
pub(crate) fn new(bits: u32) -> Self {
RSP_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RSP_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl R {
#[doc = "Bits 0:31 - Response"]
#[inline(always)]
pub fn rsp(&self) -> RSP_R {
RSP_R::new((self.bits & 0xffff_ffff) as u32)
}
}
#[doc = "Response Register\n\nThis register you can [`read`](crate::generic::Reg::read). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [rspr](index.html) module"]
pub struct RSPR_SPEC;
impl crate::RegisterSpec for RSPR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [rspr::R](R) reader structure"]
impl crate::Readable for RSPR_SPEC {
type Reader = R;
}
| 29.288889 | 224 | 0.614568 |
e279e6e357eb6f12f0d82e55af289575a488bd1b
| 3,818 |
#![allow(non_snake_case)]
use compose_rt::{compose, Composer, Recomposer};
use fltk::{
app, button,
group::{self, Flex},
prelude::*,
text,
window::Window,
};
use std::{cell::RefCell, rc::Rc};
////////////////////////////////////////////////////////////////////////////
// User application
////////////////////////////////////////////////////////////////////////////
pub struct Movie {
pub id: usize,
pub name: String,
pub img_url: String,
}
impl Movie {
pub fn new(id: usize, name: impl Into<String>, img_url: impl Into<String>) -> Self {
Movie {
id,
name: name.into(),
img_url: img_url.into(),
}
}
}
#[compose]
pub fn MoviesScreen(movies: &Vec<Movie>) {
Column(cx, |cx| {
for movie in movies {
cx.tag(movie.id, |cx| MovieOverview(cx, &movie));
}
});
}
#[compose]
pub fn MovieOverview(movie: &Movie) {
Column(cx, |cx| {
Text(cx, &movie.name);
let count = cx.remember(|| Rc::new(RefCell::new(0usize)));
let c = count.clone();
Button(
cx,
&format!("{} get {} likes", movie.name, count.borrow()),
move || *c.borrow_mut() += 1,
);
Text(cx, format!("Count {}", count.borrow()));
});
}
fn main() {
// Setup logging
env_logger::Builder::from_default_env()
.filter_level(log::LevelFilter::Trace)
.init();
let app = app::App::default();
// define root compose
let root_fn = |cx: &mut Composer, movies| Window(cx, |cx| MoviesScreen(cx, movies));
let mut recomposer = Recomposer::new(20);
let movies = vec![Movie::new(1, "A", "IMG_A"), Movie::new(2, "B", "IMG_B")];
recomposer.compose(|cx| {
root_fn(cx, &movies);
});
while app.wait() {
recomposer.compose(|cx| {
root_fn(cx, &movies);
});
}
}
////////////////////////////////////////////////////////////////////////////
// Components - Usage of compose-rt
////////////////////////////////////////////////////////////////////////////
#[compose(skip_inject_cx = true)]
pub fn Window<C>(cx: &mut Composer, content: C)
where
C: Fn(&mut Composer),
{
cx.group(
|_| Window::default().with_size(400, 300),
|_| false,
content,
|_, _| {},
|win| {
win.end();
win.show();
},
)
}
#[compose(skip_inject_cx = true)]
pub fn Column<C>(cx: &mut Composer, content: C)
where
C: Fn(&mut Composer),
{
cx.group(
|_| {
let mut flex = Flex::new(0, 0, 400, 300, None);
flex.set_type(group::FlexType::Column);
flex
},
|_| false,
content,
|_, _| {},
|flex| {
flex.end();
},
);
}
#[compose(skip_inject_cx = true)]
pub fn Text(cx: &mut Composer, text: impl AsRef<str>) {
let text = text.as_ref();
cx.memo(
|_| {
let mut editor = text::TextEditor::default()
.with_size(390, 290)
.center_of_parent();
let mut buf = text::TextBuffer::default();
buf.set_text(text);
editor.set_buffer(buf);
editor
},
|n| n.buffer().unwrap().text().eq(text),
|n| {
n.buffer().as_mut().unwrap().set_text(text);
},
|_| {},
);
}
#[compose(skip_inject_cx = true)]
pub fn Button<F>(cx: &mut Composer, text: &str, mut cb: F)
where
F: 'static + FnMut(),
{
cx.memo(
|_| {
let mut btn = button::Button::new(160, 210, 80, 40, None);
btn.set_callback(move |_| cb());
btn
},
|n| n.label().eq(text),
|n| {
n.set_label(text);
},
|_| {},
);
}
| 23.8625 | 88 | 0.454426 |
91d5fee4abcce2f36e529fdb51892422030b02ac
| 71 |
pub mod canister_management;
pub mod common;
pub mod init;
pub mod pb;
| 14.2 | 28 | 0.774648 |
c127d67881dfb1edcbf26b90bc84a0f69292a236
| 3,657 |
// Copyright 2021 Conveen
//! Trait and utility functions for implementing profiles.
/// Run an arbitrary command using the provided shell, or Bash by default, and return stdout
/// and stderr.
pub fn run_command<I, S>(command: I, shell: Option<&str>) -> crate::error::Result<(String, String)>
where
I: IntoIterator<Item = S>,
S: AsRef<std::ffi::OsStr>,
{
let shell = shell.unwrap_or("/usr/bin/bash");
let output = std::process::Command::new(shell)
.arg("-c")
.args(command)
.output()
.map_err(crate::error::Error::from)?;
let stdout = std::str::from_utf8(&output.stdout)
.map_err(crate::error::Error::from)?
.trim()
.to_string();
let stderr = std::str::from_utf8(&output.stderr)
.map_err(crate::error::Error::from)?
.trim()
.to_string();
Ok((stdout, stderr))
}
/// Status of the first `ethernet` device.
pub fn gen_ethernet_status() -> crate::error::Result<String> {
let (ethernet_status, _) = run_command(
&["nmcli device status | grep ethernet | head -n 1 | awk '{ print $3 }'"],
None,
)?;
Ok(ethernet_status)
}
/// An ethernet cable is connected to the computer.
///
/// The `connecting` status means that an ethernet cable is plugged in but NetworkManager waiting for configuration,
/// either statically or via DHCP. This function determines whether a cable is connected, and
/// thus return `true` for `connecting`.
pub fn ethernet_is_connected() -> crate::error::Result<bool> {
let ethernet_status = gen_ethernet_status()?;
Ok(ethernet_status == "connected".to_string() || ethernet_status == "connecting".to_string())
}
/// Status of the first `wifi` device.
pub fn gen_wifi_status() -> crate::error::Result<String> {
let (ethernet_status, _) = run_command(
&["nmcli device status | grep wifi | head -n 1 | awk '{ print $3 }'"],
None,
)?;
Ok(ethernet_status)
}
/// The wireless card is enabled (status is not `unavailable).
///
/// Useful if waiting for the wireless card to be enabled before connecting to a specific
/// network.
pub fn wifi_is_available() -> crate::error::Result<bool> {
let wifi_status = gen_wifi_status()?;
Ok(wifi_status != "".to_string() && wifi_status != "unavailable".to_string())
}
/// The wireless card is connected to a Wi-Fi network.
///
/// Useful if waiting for Wi-Fi to connect before performing another step (i.e., connecting to
/// a VPN).
pub fn wifi_is_connected() -> crate::error::Result<bool> {
Ok(gen_wifi_status()? == "connected".to_string())
}
/// Utility function to run a function continuously until it returns `true`.
///
/// Can be used with functions like [wifi_is_connected](fn.wifi_is_connected.html)
/// to wait for a Wi-Fi connection before performing other steps in a profile.
pub fn wait_for<F>(predicate: F, sleep_for: Option<u64>) -> crate::error::Result<()>
where
F: Fn() -> crate::error::Result<bool>,
{
let sleep_for = sleep_for.unwrap_or(1);
while !predicate()? {
std::thread::sleep(std::time::Duration::from_secs(sleep_for));
}
Ok(())
}
/// Network profile.
///
/// All profiles must implement the [up](trait.Profile.html#tymethod.up) and [down](trait.Profile.html#tymethod.down)
/// methods, which determine the commands that get run when applying or removing the profile.
/// The [run_command](fn.run_command.html) method should be used for running
/// arbitary commands in a profile, such as connecting to a VPN or resetting IPv4 settings.
pub trait Profile {
/// Apply network profile.
fn up(&self);
/// Remove network profile.
fn down(&self);
}
| 35.504854 | 117 | 0.665846 |
91c89fc49041ccd818bc23a5019c7867737c4bf8
| 727 |
extern crate structopt;
use std::path::PathBuf;
use structopt::StructOpt;
mod read;
mod printer;
#[derive(StructOpt)]
#[structopt(name = "lsr", about = "Rust implementation of UNIX ls command.")]
struct Opt {
// Include files starting with a dot (.)
#[structopt(short = "a")]
all: bool,
// The directory to list the contents of
#[structopt(parse(from_os_str), default_value = ".")]
path: PathBuf,
}
fn main() {
let opts = Opt::from_args();
let contents = read::read_dir(opts.path.as_path());
let contents = match contents {
Ok(c) => c,
Err(e) => {
println!("{}", e);
return ();
}
};
printer::print_contents(contents, opts.all);
}
| 22.030303 | 77 | 0.588721 |
64e94592ba9ace5715fccbfef69e674a5e818667
| 824 |
use derive_more::From;
use game_input_model::GameInputEvent;
use network_session_model::SessionMessageEvent;
use serde::{Deserialize, Serialize};
use session_host_model::SessionHostEvent;
use session_join_model::SessionJoinEvent;
use session_lobby_model::SessionLobbyEvent;
/// All variants of messages that can be sent over the network.
#[derive(Clone, Debug, Deserialize, From, PartialEq, Serialize)]
pub enum NetMessageEvent {
/// `GameInputEvent` messages.
GameInputEvent(GameInputEvent),
/// `SessionHostEvent` messages.
SessionHostEvent(SessionHostEvent),
/// `SessionJoinEvent` messages.
SessionJoinEvent(SessionJoinEvent),
/// `SessionLobbyEvent` messages.
SessionLobbyEvent(SessionLobbyEvent),
/// `SessionMessageEvent` messages.
SessionMessageEvent(SessionMessageEvent),
}
| 35.826087 | 64 | 0.776699 |
507513e1d9f51a5ef317670196e787747831f84b
| 2,375 |
use std::io::{Read, Result, Seek, SeekFrom};
type FnAPI = Box<dyn Fn(usize, &mut [u8]) -> Result<()>>;
/// Implements `Read + Seek` for a (blocking) function that reads ranges of bytes.
/// # Implementation
/// This struct has an internal `Vec<u8>` that buffers calls.
pub struct RangedReader {
pos: u64, // position of the seek
length: u64, // total size
buffer: Vec<u8>, // a ring
offset: usize, // offset in the ring: buffer[:offset] have been read
range_fn: FnAPI,
}
impl RangedReader {
/// Creates a new [`RangedReader`] with internal buffer `buffer`
pub fn new(length: usize, range_fn: FnAPI, mut buffer: Vec<u8>) -> Self {
let length = length as u64;
buffer.clear();
Self {
pos: 0,
range_fn,
length,
buffer,
offset: 0,
}
}
fn read_more(&mut self, to_consume: usize) -> Result<()> {
let remaining = self.buffer.len() - self.offset;
if to_consume < remaining {
return Ok(());
}
let to_read = std::cmp::max(
std::cmp::max(self.offset, to_consume),
self.buffer.capacity(),
) - remaining;
self.buffer.rotate_left(self.offset);
self.buffer.resize(remaining + to_read, 0);
(self.range_fn)(self.pos as usize, &mut self.buffer[remaining..])?;
self.pos += to_read as u64;
self.offset = 0;
Ok(())
}
}
impl Read for RangedReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
let to_consume = buf.len();
self.read_more(to_consume)?;
// copy from the internal buffer.
buf[..to_consume].copy_from_slice(&self.buffer[self.offset..self.offset + to_consume]);
// and offset
self.offset += to_consume;
Ok(to_consume)
}
}
impl Seek for RangedReader {
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
match pos {
SeekFrom::Start(pos) => self.pos = pos,
SeekFrom::End(pos) => self.pos = (self.length as i64 + pos) as u64,
SeekFrom::Current(pos) => self.pos = (self.pos as i64 + pos) as u64,
};
// todo: optimize: do not clear buffer and instead check whether we can just move the offset.
self.offset = 0;
self.buffer.clear();
Ok(self.pos)
}
}
| 30.844156 | 101 | 0.564211 |
5d630dc41f143a0dfd62749d6382ae3a1c4495ea
| 771 |
//! Types used throughout.
//!
//! > Schema defines the plain old data types that views operate on. Notably, the schema module has
//! > no knowledge of the database, nor any dependencies on any of the rest of the system.
use serde_derive::Serialize;
use uuid::Uuid;
/// A team.
#[derive(Clone, Debug, Queryable, Serialize)]
pub struct Team {
/// The team's database ID.
pub id: Uuid,
/// The team's name.
pub name: String,
}
/// A user.
#[derive(Clone, Debug, Queryable, Serialize)]
pub struct User {
/// The user's database ID.
#[serde(skip)]
pub id: i32,
/// The user's name.
pub name: String,
/// The user's email address.
pub email: String,
/// The database ID of the user's team.
pub team: Option<Uuid>,
}
| 22.028571 | 99 | 0.636835 |
e4bb8f1f73fcf8f77793c4e3c020fe4330bc80d1
| 6,796 |
use crate::rita_common::dashboard::Dashboard;
use crate::rita_common::debt_keeper::{DebtKeeper, Dump, NodeDebtData};
use crate::rita_common::tunnel_manager::{GetNeighbors, Neighbor, TunnelManager};
use crate::SETTING;
use ::actix::{Handler, Message, ResponseFuture, SystemService};
use ::actix_web::AsyncResponder;
use ::actix_web::{HttpRequest, Json};
use althea_types::Identity;
use arrayvec::ArrayString;
use babel_monitor::open_babel_stream;
use babel_monitor::Babel;
use failure::Error;
use futures::Future;
use num256::{Int256, Uint256};
use settings::client::RitaClientSettings;
use settings::RitaCommonSettings;
use std::boxed::Box;
use std::collections::HashMap;
#[derive(Serialize)]
pub struct NodeInfo {
pub nickname: String,
pub ip: String,
pub route_metric_to_exit: u16,
pub total_payments: Uint256,
pub debt: Int256,
pub link_cost: u16,
pub price_to_exit: u32,
}
pub struct GetNeighborInfo;
impl Message for GetNeighborInfo {
type Result = Result<Vec<NodeInfo>, Error>;
}
/// Gets info about neighbors, including interested data about what their route
/// price is to the exit and how much we may owe them. The debt data is now legacy
/// since the /debts endpoint was introduced, and should be removed when it can be
/// coordinated with the frontend.
/// The routes info might also belong in /exits or a dedicated /routes endpoint
impl Handler<GetNeighborInfo> for Dashboard {
type Result = ResponseFuture<Vec<NodeInfo>, Error>;
fn handle(&mut self, _msg: GetNeighborInfo, _ctx: &mut Self::Context) -> Self::Result {
Box::new(
DebtKeeper::from_registry()
.send(Dump {})
.from_err()
.and_then(|debts| {
TunnelManager::from_registry()
.send(GetNeighbors {})
.from_err()
.and_then(|neighbors| {
let mut debts = debts?;
if neighbors.is_ok() {
let neighbors = neighbors?;
merge_debts_and_neighbors(neighbors, &mut debts);
}
let stream = open_babel_stream(SETTING.get_network().babel_port)?;
let mut babel = Babel::new(stream);
babel.start_connection()?;
let route_table_sample = babel.parse_routes()?;
let mut output = Vec::new();
let exit_client = SETTING.get_exit_client();
let current_exit = exit_client.get_current_exit();
for (identity, debt_info) in debts.iter() {
let nickname = match identity.nickname {
Some(val) => val,
None => ArrayString::<[u8; 32]>::from("No Nickname").unwrap(),
};
if current_exit.is_some() {
let exit_ip = current_exit.unwrap().id.mesh_ip;
let maybe_route = babel.get_route_via_neigh(
identity.mesh_ip,
exit_ip,
&route_table_sample,
);
// We have a peer that is an exit, so we can't find a route
// from them to our selected exit. Other errors can also get
// caught here
if maybe_route.is_err() {
output.push(nonviable_node_info(
nickname,
identity.mesh_ip.to_string(),
));
continue;
}
// we check that this is safe above
let route = maybe_route.unwrap();
output.push(NodeInfo {
nickname: nickname.to_string(),
ip: serde_json::to_string(&identity.mesh_ip).unwrap(),
route_metric_to_exit: route.metric,
total_payments: debt_info.total_payment_received.clone(),
debt: debt_info.debt.clone(),
link_cost: route.refmetric,
price_to_exit: route.price,
})
} else {
output.push(NodeInfo {
nickname: nickname.to_string(),
ip: serde_json::to_string(&identity.mesh_ip).unwrap(),
route_metric_to_exit: u16::max_value(),
total_payments: debt_info.total_payment_received.clone(),
debt: debt_info.debt.clone(),
link_cost: u16::max_value(),
price_to_exit: u32::max_value(),
})
}
}
Ok(output)
})
}),
)
}
}
pub fn get_neighbor_info(
_req: HttpRequest,
) -> Box<dyn Future<Item = Json<Vec<NodeInfo>>, Error = Error>> {
debug!("Neighbors endpoint hit!");
Dashboard::from_registry()
.send(GetNeighborInfo {})
.from_err()
.and_then(move |reply| Ok(Json(reply?)))
.responder()
}
/// Takes a list of neighbors and debts, if an entry
/// is found in the neighbors list that is not in the debts list
/// the debts list is extended to include it
fn merge_debts_and_neighbors(
neighbors: Vec<Neighbor>,
debts: &mut HashMap<Identity, NodeDebtData>,
) {
for neighbor in neighbors {
let id = neighbor.identity.global;
debts.entry(id).or_insert_with(NodeDebtData::new);
}
}
fn nonviable_node_info(nickname: ArrayString<[u8; 32]>, ip: String) -> NodeInfo {
NodeInfo {
nickname: nickname.to_string(),
ip,
total_payments: 0u32.into(),
debt: 0.into(),
link_cost: 0,
price_to_exit: 0,
route_metric_to_exit: u16::max_value(),
}
}
| 41.950617 | 98 | 0.480724 |
b9d90795110aa3a5045493c9bd21d28c03e86a23
| 7,288 |
use crate::prelude::*;
use crate::responses::*;
use azure_sdk_core::errors::{check_status_extract_headers_and_body, AzureError};
use azure_sdk_core::prelude::*;
use azure_sdk_storage_core::prelude::*;
use hyper::StatusCode;
use std::convert::TryInto;
#[derive(Debug, Clone)]
pub struct ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
queue_service: &'a dyn QueueService<StorageClient = C>,
prefix: Option<&'b str>,
next_marker: Option<&'b str>,
max_results: Option<u32>,
include_metadata: bool,
timeout: Option<u64>,
client_request_id: Option<&'a str>,
}
impl<'a, 'b, C> ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
pub(crate) fn new(
queue_service: &'a dyn QueueService<StorageClient = C>,
) -> ListQueuesBuilder<'a, 'b, C> {
ListQueuesBuilder {
queue_service,
prefix: None,
next_marker: None,
max_results: None,
include_metadata: false,
timeout: None,
client_request_id: None,
}
}
}
//set mandatory no traits methods
impl<'a, 'b, C> PrefixOption<'b> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn prefix(&self) -> Option<&'b str> {
self.prefix
}
}
impl<'a, 'b, C> NextMarkerOption<'b> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn next_marker(&self) -> Option<&'b str> {
self.next_marker
}
}
impl<'a, 'b, C> MaxResultsOption for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn max_results(&self) -> Option<u32> {
self.max_results
}
}
impl<'a, 'b, C> IncludeMetadataOption for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn include_metadata(&self) -> bool {
self.include_metadata
}
}
impl<'a, 'b, C> TimeoutOption for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn timeout(&self) -> Option<u64> {
self.timeout
}
}
impl<'a, 'b, C> ClientRequestIdOption<'a> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
#[inline]
fn client_request_id(&self) -> Option<&'a str> {
self.client_request_id
}
}
impl<'a, 'b, C> PrefixSupport<'b> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_prefix(self, prefix: &'b str) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: Some(prefix),
next_marker: self.next_marker,
max_results: self.max_results,
include_metadata: self.include_metadata,
timeout: self.timeout,
client_request_id: self.client_request_id,
}
}
}
impl<'a, 'b, C> NextMarkerSupport<'b> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_next_marker(self, next_marker: &'b str) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: self.prefix,
next_marker: Some(next_marker),
max_results: self.max_results,
include_metadata: self.include_metadata,
timeout: self.timeout,
client_request_id: self.client_request_id,
}
}
}
impl<'a, 'b, C> MaxResultsSupport for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_max_results(self, max_results: u32) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: self.prefix,
next_marker: self.next_marker,
max_results: Some(max_results),
include_metadata: self.include_metadata,
timeout: self.timeout,
client_request_id: self.client_request_id,
}
}
}
impl<'a, 'b, C> IncludeMetadataSupport for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_include_metadata(self) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: self.prefix,
next_marker: self.next_marker,
max_results: self.max_results,
include_metadata: true,
timeout: self.timeout,
client_request_id: self.client_request_id,
}
}
}
impl<'a, 'b, C> TimeoutSupport for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_timeout(self, timeout: u64) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: self.prefix,
next_marker: self.next_marker,
max_results: self.max_results,
include_metadata: self.include_metadata,
timeout: Some(timeout),
client_request_id: self.client_request_id,
}
}
}
impl<'a, 'b, C> ClientRequestIdSupport<'a> for ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
type O = ListQueuesBuilder<'a, 'b, C>;
#[inline]
fn with_client_request_id(self, client_request_id: &'a str) -> Self::O {
ListQueuesBuilder {
queue_service: self.queue_service,
prefix: self.prefix,
next_marker: self.next_marker,
max_results: self.max_results,
include_metadata: self.include_metadata,
timeout: self.timeout,
client_request_id: Some(client_request_id),
}
}
}
// methods callable regardless
impl<'a, 'b, C> ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
pub fn queue_service(&self) -> &'a dyn QueueService<StorageClient = C> {
self.queue_service
}
}
// methods callable only when every mandatory field has been filled
impl<'a, 'b, C> ListQueuesBuilder<'a, 'b, C>
where
C: Client,
{
pub async fn execute(self) -> Result<ListQueuesResponse, AzureError> {
let mut uri = format!(
"{}?comp=list",
self.queue_service.storage_client().queue_uri()
);
if let Some(nm) = IncludeMetadataOption::to_uri_parameter(&self) {
uri = format!("{}&{}", uri, nm);
}
if let Some(nm) = TimeoutOption::to_uri_parameter(&self) {
uri = format!("{}&{}", uri, nm);
}
if let Some(nm) = MaxResultsOption::to_uri_parameter(&self) {
uri = format!("{}&{}", uri, nm);
}
if let Some(nm) = NextMarkerOption::to_uri_parameter(&self) {
uri = format!("{}&{}", uri, nm);
}
if let Some(nm) = PrefixOption::to_uri_parameter(&self) {
uri = format!("{}&{}", uri, nm);
}
debug!("uri == {}", uri);
let future_response = self.queue_service.storage_client().perform_request(
&uri,
&http::Method::GET,
&|mut request| {
request = ClientRequestIdOption::add_header(&self, request);
request
},
Some(&[]),
)?;
let (headers, body) =
check_status_extract_headers_and_body(future_response, StatusCode::OK).await?;
(&headers, &body as &[u8]).try_into()
}
}
| 26.121864 | 90 | 0.582602 |
0e4b6b381185197ea1305405e5b81f2812d25d0d
| 1,193 |
// Licensed to Elasticsearch B.V under
// one or more agreements.
// Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
// See the LICENSE file in the project root for more information
use crate::ffi::*;
use com::{
interfaces::iunknown::IUnknown,
sys::{FAILED, HRESULT},
};
interfaces! {
#[uuid("F0963021-E1EA-4732-8581-E01B0BD3C0C6")]
pub unsafe interface ICorProfilerFunctionControl: IUnknown {
pub fn SetCodegenFlags(&self, flags: DWORD) -> HRESULT;
pub fn SetILFunctionBody(
&self,
cbNewILMethodHeader: ULONG,
pbNewILMethodHeader: LPCBYTE,
) -> HRESULT;
pub fn SetILInstrumentedCodeMap(
&self,
cILMapEntries: ULONG,
rgILMapEntries: *const COR_IL_MAP,
) -> HRESULT;
}
}
impl ICorProfilerFunctionControl {
pub fn set_il_function_body(&self, new_method: &[u8]) -> Result<(), HRESULT> {
let len = new_method.len() as ULONG;
let ptr = new_method.as_ptr();
let hr = unsafe { self.SetILFunctionBody(len, ptr) };
if FAILED(hr) {
Err(hr)
} else {
Ok(())
}
}
}
| 29.097561 | 82 | 0.603521 |
0896cc74122920823b4127012f6fd7de26b1ee06
| 13,988 |
use libc::{ c_int, c_uint, size_t, c_char, c_uchar, c_void, pthread_t, mqd_t };
use dlt_types::{ DltReturnValue, DltLogLevelType, DltFormatType, DltNetworkTraceType, DltUserLogMode, DltTraceStatusType };
use dlt_common::{ DltBuffer, DltReceiver, DLT_ID_SIZE };
/// Maximum size of each user buffer, also used for injection buffer
pub const DLT_USER_BUF_MAX_SIZE: usize = 1390;
/// Size of resend buffer; Max DLT message size is 1390 bytes plus some extra header space
pub const DLT_USER_RESENDBUF_MAX_SIZE: usize = DLT_USER_BUF_MAX_SIZE + 100;
/// This structure is used for every context used in an application
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct DltContext {
/// Context ID
pub contextID: [c_char; DLT_ID_SIZE],
/// Offset in user-application context field
pub log_level_pos: i32,
/// Pointer to the log level
pub log_level_ptr: *mut i8,
/// Pointer to the trace status
pub trace_status_ptr: *mut i8,
/// Message counter
pub mcnt: u8
}
/// This structure is used for context data used in an application
#[repr(C)]
pub struct DltContextData {
/// Pointer to DltContext
pub handle: *mut DltContext,
/// Buffer for building log message
pub buffer: [c_uchar; DLT_USER_BUF_MAX_SIZE],
/// Payload size
pub size: i32,
/// Log level
pub log_level: i32,
/// Trace status
pub trace_status: i32,
/// Number of arguments for extended header
pub args_num: i32,
/// Description of context
pub context_description: *mut c_char
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct DltUserInjectionCallback {
pub service_id: u32,
pub injection_callback: Option<unsafe extern fn(service_id: u32, data: *mut c_void, length: u32) -> c_int>
}
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct DltUserLogLevelChangedCallback {
/// Context ID
pub contextID: [c_char; DLT_ID_SIZE],
/// Log level
pub log_level: i8,
/// Trace status
pub trace_status: i8,
pub log_level_changed_callback: Option<unsafe extern fn(context_id: *mut c_char, log_level: u8, trace_status: u8)>
}
/// This structure is used in a table managing all contexts and the corresponding log levels in an application
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct dlt_ll_ts_type {
/// Context ID
pub contextID: [c_char; DLT_ID_SIZE],
/// Log level
pub log_level: i8,
/// Pointer to the log level
pub log_level_ptr: *mut i8,
/// Trace status
pub trace_status: i8,
/// Pointer to the trace status
pub trace_status_ptr: *mut i8,
/// Description of context
pub context_description: *mut c_char,
/// Table with pointers to injection functions and service IDs
pub injection_table: *mut DltUserInjectionCallback,
pub nrcallbacks: u32,
/// Log level changed callback
pub log_level_changed_callback: Option<unsafe extern fn(context_id: *mut c_char, log_level: u8, trace_status: u8)>
}
/// This structure holds initial log-level for given appId:ctxId pair
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct dlt_env_ll_item {
pub appId: [c_char; DLT_ID_SIZE],
pub ctxId: [c_char; DLT_ID_SIZE],
pub ll: i8
}
/// This structure holds all initial log-levels given via environment variable DLT_INITIAL_LOG_LEVEL
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct dlt_env_ll_set {
pub item: *mut dlt_env_ll_item,
pub array_size: size_t,
pub num_elem: size_t
}
/// This structure is used once for one application.
#[repr(C)]
pub struct DltUser {
/// ECU ID
pub ecuID: [c_char; DLT_ID_SIZE],
/// Application ID
pub appID: [c_char; DLT_ID_SIZE],
/// Handle to FIFO of DLT daemon
pub dlt_log_handle: c_int,
/// Handle to own FIFO
pub dlt_user_handle: c_int,
/// Handle message queue
pub dlt_segmented_queue_read_handle: mqd_t,
/// Handle message queue
pub dlt_segmented_queue_write_handle: mqd_t,
/// Thread handle of segmented sending
pub dlt_segmented_nwt_handle: pthread_t,
/// Target of logging: 1 to file, 0 to daemon
pub dlt_is_file: i8,
/// [MAX_DLT_LL_TS_ENTRIES]; < Internal management struct for all
pub dlt_ll_ts: *mut dlt_ll_ts_type,
/// Maximum number of contexts
pub dlt_ll_ts_max_num_entries: u32,
/// Number of used contexts
pub dlt_ll_ts_num_entries: u32,
/// Overflow marker, set to 1 on overflow, 0 otherwise
pub overflow: i8,
/// Counts the number of lost messages
pub overflow_counter: u32,
/// Description of application
pub application_description: *mut c_char,
/// Receiver for internal user-defined messages from daemon
pub receiver: DltReceiver,
/// Verbose mode enabled: 1 enabled, 0 disabled
pub verbose_mode: i8,
/// Use extended header for non verbose: 1 enabled, 0 disabled
pub use_extende_header_for_non_verbose: i8,
/// Send always session ID: 1 enabled, 0 disabled
pub with_session_id: i8,
/// Send always timestamp: 1 enabled, 0 disabled
pub with_timestamp: i8,
/// Send always ecu ID: 1 enabled, 0 disabled
pub with_ecu_id: i8,
/// Local printing of log messages: 1 enabled, 0 disabled
pub enable_local_print: i8,
/// Local print mode, controlled by environment variable
pub local_print_mode: i8,
/// Log state of external connection:
///
/// `1` - client connected
///
/// `0` - no connected
///
/// `-1` unknown
pub log_state: i8,
/// Ring-buffer for buffering messages during startup and missing connection
pub startup_buffer: DltBuffer,
/// Buffer used for resending, locked by DLT semaphore
pub resend_buffer: [u8; DLT_USER_RESENDBUF_MAX_SIZE],
/// Timeout used in dlt_user_atexit_blow_out_user_buffer, in 0.1 milliseconds
pub timeout_at_exit_handler: u32,
pub initial_ll_set: dlt_env_ll_set,
// #ifdef DLT_SHM_ENABLE
// pub dlt_shm: DltShm,
// #endif
// #ifdef DLT_TEST_ENABLE
// pub corrupt_user_header: c_int,
// pub corrupt_message_size: c_int,
// pub corrupt_message_size_size: i16,
// #endif
}
extern "C" {
pub fn dlt_user_log_write_start(handle: *mut DltContext, log: *mut DltContextData, loglevel: DltLogLevelType) -> DltReturnValue;
pub fn dlt_user_log_write_start_id(handle: *mut DltContext, log: *mut DltContextData, loglevel: DltLogLevelType, messageid: u32) -> DltReturnValue;
pub fn dlt_user_log_write_finish(log: *mut DltContextData) -> DltReturnValue;
pub fn dlt_user_log_write_bool(log: *mut DltContextData, data: u8) -> DltReturnValue;
pub fn dlt_user_log_write_float32(log: *mut DltContextData, data: f32) -> DltReturnValue;
pub fn dlt_user_log_write_float64(log: *mut DltContextData, data: f64) -> DltReturnValue;
pub fn dlt_user_log_write_uint(log: *mut DltContextData, data: c_uint) -> DltReturnValue;
pub fn dlt_user_log_write_uint8(log: *mut DltContextData, data: u8) -> DltReturnValue;
pub fn dlt_user_log_write_uint16(log: *mut DltContextData, data: u16) -> DltReturnValue;
pub fn dlt_user_log_write_uint32(log: *mut DltContextData, data: u32) -> DltReturnValue;
pub fn dlt_user_log_write_uint64(log: *mut DltContextData, data: u64) -> DltReturnValue;
pub fn dlt_user_log_write_uint8_formatted(log: *mut DltContextData, data: u8, _type: DltFormatType) -> DltReturnValue;
pub fn dlt_user_log_write_uint16_formatted(log: *mut DltContextData, data: u16, _type: DltFormatType) -> DltReturnValue;
pub fn dlt_user_log_write_uint32_formatted(log: *mut DltContextData, data: u32, _type: DltFormatType) -> DltReturnValue;
pub fn dlt_user_log_write_uint64_formatted(log: *mut DltContextData, data: u64, _type: DltFormatType) -> DltReturnValue;
pub fn dlt_user_log_write_ptr(log: *mut DltContextData, data: *mut c_void) -> DltReturnValue;
pub fn dlt_user_log_write_int(log: *mut DltContextData, data: c_int) -> DltReturnValue;
pub fn dlt_user_log_write_int8(log: *mut DltContextData, data: i8) -> DltReturnValue;
pub fn dlt_user_log_write_int16(log: *mut DltContextData, data: i16) -> DltReturnValue;
pub fn dlt_user_log_write_int32(log: *mut DltContextData, data: i32) -> DltReturnValue;
pub fn dlt_user_log_write_int64(log: *mut DltContextData, data: i64) -> DltReturnValue;
pub fn dlt_user_log_write_string(log: *mut DltContextData, text: *const c_char) -> DltReturnValue;
pub fn dlt_user_log_write_constant_string(log: *mut DltContextData, text: *const c_char) -> DltReturnValue;
pub fn dlt_user_log_write_utf8_string(log: *mut DltContextData, text: *const c_char) -> DltReturnValue;
pub fn dlt_user_log_write_raw(log: *mut DltContextData, data: *mut c_void, length: u16) -> DltReturnValue;
pub fn dlt_user_log_write_raw_formatted(log: *mut DltContextData, data: *mut c_void, length: u16, _type: DltFormatType) -> DltReturnValue;
pub fn dlt_user_trace_network(handle: *mut DltContext, nw_trace_type: DltNetworkTraceType, header_len: u16, header: *mut c_void, payload_len: u16, payload: *mut c_void) -> DltReturnValue;
pub fn dlt_user_trace_network_truncated(handle: *mut DltContext, nw_trace_type: DltNetworkTraceType, header_len: u16, header: *mut c_void, payload_len: u16, payload: *mut c_void, allow_truncate: c_int) -> DltReturnValue;
pub fn dlt_user_trace_network_segmented(handle: *mut DltContext, nw_trace_type: DltNetworkTraceType, header_len: u16, header: *mut c_void, payload_len: u16, payload: *mut c_void) -> DltReturnValue;
//// The following API functions define a high level function interface for DLT
pub fn dlt_init() -> DltReturnValue;
pub fn dlt_init_file(name: *const c_char) -> DltReturnValue;
pub fn dlt_free() -> DltReturnValue;
pub fn dlt_check_library_version(user_major_version: *const c_char, user_minor_version: *const c_char) -> DltReturnValue;
pub fn dlt_register_app(appid: *const c_char, description: *const c_char) -> DltReturnValue;
pub fn dlt_unregister_app() -> DltReturnValue;
pub fn dlt_register_context(handle: *mut DltContext, contextid: *const c_char, description: *const c_char) -> DltReturnValue;
pub fn dlt_register_context_ll_ts(handle: *mut DltContext, contextid: *const c_char, description: *const c_char, loglevel: c_int, tracestatus: c_int) -> DltReturnValue;
pub fn dlt_unregister_context(handle: *mut DltContext) -> DltReturnValue;
pub fn dlt_set_resend_timeout_atexit(timeout_in_milliseconds: u32) -> c_int;
pub fn dlt_set_log_mode(mode: DltUserLogMode) -> DltReturnValue;
pub fn dlt_get_log_state() -> c_int;
pub fn dlt_register_injection_callback(handle: *mut DltContext, service_id: u32, dlt_injection_callback: Option<unsafe extern fn (service_id: u32, data: *mut c_void, length: u32) -> c_int>) -> DltReturnValue;
pub fn dlt_register_log_level_changed_callback(handle: *mut DltContext, dlt_log_level_changed_callback: Option<unsafe extern fn (context_id: *mut c_char, log_level: u8, trace_status: u8)>) -> DltReturnValue;
pub fn dlt_verbose_mode() -> DltReturnValue;
pub fn dlt_user_check_library_version(user_major_version: *const c_char, user_minor_version: *const c_char) -> DltReturnValue;
pub fn dlt_nonverbose_mode() -> DltReturnValue;
pub fn dlt_use_extended_header_for_non_verbose(use_extende_header_for_non_verbose: i8) -> DltReturnValue;
pub fn dlt_with_session_id(with_session_id: i8) -> DltReturnValue;
pub fn dlt_with_timestamp(with_timestamp: i8) -> DltReturnValue;
pub fn dlt_with_ecu_id(with_ecu_id: i8) -> DltReturnValue;
pub fn dlt_set_application_ll_ts_limit(loglevel: DltLogLevelType, tracestatus: DltTraceStatusType) -> DltReturnValue;
pub fn dlt_env_adjust_ll_from_env(ll_set: *const dlt_env_ll_set, apid: *const c_char, ctid: *const c_char, ll: c_int) -> c_int;
pub fn dlt_env_extract_ll_set(env: *mut *mut c_char, ll_set: *mut dlt_env_ll_set) -> c_int;
pub fn dlt_env_free_ll_set(ll_set: *mut dlt_env_ll_set);
pub fn dlt_enable_local_print() -> DltReturnValue;
pub fn dlt_disable_local_print() -> DltReturnValue;
pub fn dlt_log_string(handle: *mut DltContext, loglevel: DltLogLevelType, text: *const c_char) -> DltReturnValue;
pub fn dlt_log_string_int(handle: *mut DltContext, loglevel: DltLogLevelType, text: *const c_char, data: c_int) -> DltReturnValue;
pub fn dlt_log_string_uint(handle: *mut DltContext, loglevel: DltLogLevelType, text: *const c_char, data: c_uint) -> DltReturnValue;
pub fn dlt_log_int(handle: *mut DltContext, loglevel: DltLogLevelType, data: c_int) -> DltReturnValue;
pub fn dlt_log_uint(handle: *mut DltContext, loglevel: DltLogLevelType, data: c_uint) -> DltReturnValue;
pub fn dlt_log_raw(handle: *mut DltContext, loglevel: DltLogLevelType, data: *mut c_void, length: u16) -> DltReturnValue;
pub fn dlt_log_marker() -> DltReturnValue;
pub fn dlt_forward_msg(msgdata: *mut c_void, size: size_t) -> DltReturnValue;
pub fn dlt_user_check_buffer(total_size: *mut c_int, used_size: *mut c_int) -> DltReturnValue;
pub fn dlt_user_atexit_blow_out_user_buffer() -> c_int;
pub fn dlt_user_log_resend_buffer() -> DltReturnValue;
// #ifdef DLT_TEST_ENABLE
//void dlt_user_test_corrupt_user_header(int enable);
//void dlt_user_test_corrupt_message_size(int enable,int16_t size);
// #endif
}
#[inline]
pub unsafe fn dlt_user_is_logLevel_enabled(handle: *mut DltContext,
loglevel: DltLogLevelType) -> DltReturnValue {
let handle = handle.as_ref();
if handle.is_none() {
return DltReturnValue::DLT_RETURN_WRONG_PARAMETER;
}
let log_level_ptr = handle.unwrap().log_level_ptr;
if log_level_ptr.is_null() {
return DltReturnValue::DLT_RETURN_WRONG_PARAMETER;
}
let log_level = log_level_ptr.as_ref().unwrap();
if loglevel as i8 <= *log_level && loglevel != DltLogLevelType::DLT_LOG_OFF {
return DltReturnValue::DLT_RETURN_TRUE;
}
DltReturnValue::DLT_RETURN_LOGGING_DISABLED
}
| 47.256757 | 224 | 0.732485 |
2f02da69a3c15b3bf33341194d6aad8644ca1d58
| 7,099 |
// Copyright (c) 2016 The vulkano developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::check_errors;
use crate::device::Device;
use crate::device::DeviceOwned;
use crate::OomError;
use crate::Success;
use crate::VulkanObject;
use std::mem::MaybeUninit;
use std::ptr;
use std::sync::Arc;
/// Used to block the GPU execution until an event on the CPU occurs.
///
/// Note that Vulkan implementations may have limits on how long a command buffer will wait for an
/// event to be signaled, in order to avoid interfering with progress of other clients of the GPU.
/// If the event isn't signaled within these limits, results are undefined and may include
/// device loss.
#[derive(Debug)]
pub struct Event {
// The event.
event: ash::vk::Event,
// The device.
device: Arc<Device>,
must_put_in_pool: bool,
}
impl Event {
/// Takes an event from the vulkano-provided event pool.
/// If the pool is empty, a new event will be allocated.
/// Upon `drop`, the event is put back into the pool.
///
/// For most applications, using the event pool should be preferred,
/// in order to avoid creating new events every frame.
pub fn from_pool(device: Arc<Device>) -> Result<Event, OomError> {
let maybe_raw_event = device.event_pool().lock().unwrap().pop();
match maybe_raw_event {
Some(raw_event) => {
unsafe {
// Make sure the event isn't signaled
let fns = device.fns();
check_errors(fns.v1_0.reset_event(device.internal_object(), raw_event))?;
}
Ok(Event {
event: raw_event,
device: device,
must_put_in_pool: true,
})
}
None => {
// Pool is empty, alloc new event
Event::alloc_impl(device, true)
}
}
}
/// Builds a new event.
#[inline]
pub fn alloc(device: Arc<Device>) -> Result<Event, OomError> {
Event::alloc_impl(device, false)
}
fn alloc_impl(device: Arc<Device>, must_put_in_pool: bool) -> Result<Event, OomError> {
let event = unsafe {
let infos = ash::vk::EventCreateInfo {
flags: ash::vk::EventCreateFlags::empty(),
..Default::default()
};
let mut output = MaybeUninit::uninit();
let fns = device.fns();
check_errors(fns.v1_0.create_event(
device.internal_object(),
&infos,
ptr::null(),
output.as_mut_ptr(),
))?;
output.assume_init()
};
Ok(Event {
device: device,
event: event,
must_put_in_pool: must_put_in_pool,
})
}
/// Returns true if the event is signaled.
#[inline]
pub fn signaled(&self) -> Result<bool, OomError> {
unsafe {
let fns = self.device.fns();
let result = check_errors(
fns.v1_0
.get_event_status(self.device.internal_object(), self.event),
)?;
match result {
Success::EventSet => Ok(true),
Success::EventReset => Ok(false),
_ => unreachable!(),
}
}
}
/// See the docs of set().
#[inline]
pub fn set_raw(&mut self) -> Result<(), OomError> {
unsafe {
let fns = self.device.fns();
check_errors(
fns.v1_0
.set_event(self.device.internal_object(), self.event),
)?;
Ok(())
}
}
/// Changes the `Event` to the signaled state.
///
/// If a command buffer is waiting on this event, it is then unblocked.
///
/// # Panic
///
/// - Panics if the device or host ran out of memory.
///
#[inline]
pub fn set(&mut self) {
self.set_raw().unwrap();
}
/// See the docs of reset().
#[inline]
pub fn reset_raw(&mut self) -> Result<(), OomError> {
unsafe {
let fns = self.device.fns();
check_errors(
fns.v1_0
.reset_event(self.device.internal_object(), self.event),
)?;
Ok(())
}
}
/// Changes the `Event` to the unsignaled state.
///
/// # Panic
///
/// - Panics if the device or host ran out of memory.
///
#[inline]
pub fn reset(&mut self) {
self.reset_raw().unwrap();
}
}
unsafe impl DeviceOwned for Event {
#[inline]
fn device(&self) -> &Arc<Device> {
&self.device
}
}
unsafe impl VulkanObject for Event {
type Object = ash::vk::Event;
#[inline]
fn internal_object(&self) -> ash::vk::Event {
self.event
}
}
impl Drop for Event {
#[inline]
fn drop(&mut self) {
unsafe {
if self.must_put_in_pool {
let raw_event = self.event;
self.device.event_pool().lock().unwrap().push(raw_event);
} else {
let fns = self.device.fns();
fns.v1_0
.destroy_event(self.device.internal_object(), self.event, ptr::null());
}
}
}
}
#[cfg(test)]
mod tests {
use crate::sync::Event;
use crate::VulkanObject;
#[test]
fn event_create() {
let (device, _) = gfx_dev_and_queue!();
let event = Event::alloc(device).unwrap();
assert!(!event.signaled().unwrap());
}
#[test]
fn event_set() {
let (device, _) = gfx_dev_and_queue!();
let mut event = Event::alloc(device).unwrap();
assert!(!event.signaled().unwrap());
event.set();
assert!(event.signaled().unwrap());
}
#[test]
fn event_reset() {
let (device, _) = gfx_dev_and_queue!();
let mut event = Event::alloc(device).unwrap();
event.set();
assert!(event.signaled().unwrap());
event.reset();
assert!(!event.signaled().unwrap());
}
#[test]
fn event_pool() {
let (device, _) = gfx_dev_and_queue!();
assert_eq!(device.event_pool().lock().unwrap().len(), 0);
let event1_internal_obj = {
let event = Event::from_pool(device.clone()).unwrap();
assert_eq!(device.event_pool().lock().unwrap().len(), 0);
event.internal_object()
};
assert_eq!(device.event_pool().lock().unwrap().len(), 1);
let event2 = Event::from_pool(device.clone()).unwrap();
assert_eq!(device.event_pool().lock().unwrap().len(), 0);
assert_eq!(event2.internal_object(), event1_internal_obj);
}
}
| 28.857724 | 98 | 0.539372 |
2f19c5299c6e20cd266b2f7c157b4adb768ddc16
| 4,774 |
/*!
This example demonstrates how to manually create a glium context with any backend you want, most
notably without glutin.
There are three concepts in play:
- The `Backend` trait, which defines how glium interfaces with the OpenGL context
provider (glutin, SDL, glfw, etc.).
- The `Context` struct, which is the main object of glium. The context also provides
OpenGL-related functions like `get_free_video_memory` or `get_supported_glsl_version`.
- The `Facade` trait, which is the trait required to be implemented on objects that you pass
to functions like `VertexBuffer::new`. This trait is implemented on `Rc<Context>`, which
means that you can direct pass the context.
*/
use takeable_option::Takeable;
use glium::Surface;
use glium::glutin::{self, PossiblyCurrent};
use std::rc::Rc;
use std::cell::RefCell;
use std::os::raw::c_void;
fn main() {
// building the glutin window
// note that it's just `build` and not `build_glium`
let event_loop = glutin::event_loop::EventLoop::new();
let wb = glutin::window::WindowBuilder::new();
let cb = glutin::ContextBuilder::new();
let gl_window = cb.build_windowed(wb, &event_loop).unwrap();
let gl_window = unsafe {
gl_window.treat_as_current()
};
let gl_window = Rc::new(RefCell::new(Takeable::new(gl_window)));
// in order to create our context, we will need to provide an object which implements
// the `Backend` trait
struct Backend {
gl_window: Rc<RefCell<Takeable<glutin::WindowedContext<PossiblyCurrent>>>>,
}
unsafe impl glium::backend::Backend for Backend {
fn swap_buffers(&self) -> Result<(), glium::SwapBuffersError> {
match self.gl_window.borrow().swap_buffers() {
Ok(()) => Ok(()),
Err(glutin::ContextError::FunctionUnavailable) => panic!(),
Err(glutin::ContextError::IoError(_)) => panic!(),
Err(glutin::ContextError::OsError(_)) => panic!(),
Err(glutin::ContextError::ContextLost) => Err(glium::SwapBuffersError::ContextLost),
}
}
// this function is called only after the OpenGL context has been made current
unsafe fn get_proc_address(&self, symbol: &str) -> *const c_void {
self.gl_window.borrow().get_proc_address(symbol) as *const _
}
// this function is used to adjust the viewport when the user wants to draw or blit on
// the whole window
fn get_framebuffer_dimensions(&self) -> (u32, u32) {
// we default to a dummy value is the window no longer exists
self.gl_window.borrow().window().inner_size().into() // conversion into u32 performs rounding
}
fn is_current(&self) -> bool {
// if you are using a library that doesn't provide an equivalent to `is_current`, you
// can just put `unimplemented!` and pass `false` when you create
// the `Context` (see below)
self.gl_window.borrow().is_current()
}
unsafe fn make_current(&self) {
let mut gl_window_takeable = self.gl_window.borrow_mut();
let gl_window = Takeable::take(&mut gl_window_takeable);
let gl_window = gl_window.make_current().unwrap();
Takeable::insert(&mut gl_window_takeable, gl_window);
}
}
// now building the context
let context = unsafe {
// The first parameter is our backend.
//
// The second parameter tells glium whether or not it should regularly call `is_current`
// on the backend to make sure that the OpenGL context is still the current one.
//
// It is recommended to pass `true`, but you can pass `false` if you are sure that no
// other OpenGL context will be made current in this thread.
let backend = Backend { gl_window: gl_window };
glium::backend::Context::new(backend, true, Default::default())
}.unwrap();
// drawing a frame to prove that it works
// note that constructing a `Frame` object manually is a bit hacky and may be changed
// in the future
let mut target = glium::Frame::new(context.clone(), context.get_framebuffer_dimensions());
target.clear_color(0.0, 1.0, 0.0, 1.0);
target.finish().unwrap();
// the window is still available
event_loop.run(|event, _, control_flow| {
*control_flow = match event {
glutin::event::Event::WindowEvent { event, .. } => match event {
glutin::event::WindowEvent::CloseRequested => glutin::event_loop::ControlFlow::Exit,
_ => glutin::event_loop::ControlFlow::Poll,
},
_ => glutin::event_loop::ControlFlow::Poll,
}
});
}
| 41.513043 | 105 | 0.640553 |
e9163dccd3091e66c090a024c85565fa8e2acf00
| 6,185 |
/// Trait that must be implemented by all states that wish to work with the state machine.
/// Needed to pass ggez::event::EventHandler functions to states managed by state manager.
pub trait CustomEventHandler {
// required
/// Used to pass parameters from ggez::event::EventHandler update to state
fn update(&mut self, _ctx: &mut Context) -> HandlerMessage;
/// Used to pass parameters from ggez::event::EventHandler draw to state
fn draw(&mut self, _ctx: &mut Context) -> GameResult;
/// Used to pass parameters from ggez::event::EventHandler draw to state
fn key_down_event(&mut self, _ctx: &mut Context, _keycode: KeyCode, _keymods: KeyMods, _repeat: bool) -> HandlerMessage { HandlerMessage::Keep }
// add more EventHandler method wrappers as needed
}
/// Enum for states to return to state manager, telling the state manager what to do. (ie how to transition between states)
pub enum HandlerMessage {
/// no change needed, stick with current CustomEventHandler
Keep,
/// leave current CustomEventHandler, going back to previous CustomEventHandler
Bail,
/// spawn new CustomEventHandler on-top of current CustomEventHandler
Spawn(Box<dyn CustomEventHandler>),
/// change current CustomEventHandler into a new CustomEventHandler [Bail then Spawn]
Change(Box<dyn CustomEventHandler>),
/// an error occurred and needs reported
Error(GameError),
}
impl HandlerMessage {
/// Helper function for translating what the messages mean and making the state machine do the correct transition.
fn handle(self, sm: &mut StateMachine) -> GameResult {
match self {
HandlerMessage::Keep => (),
HandlerMessage::Bail => {sm.pop();},
HandlerMessage::Spawn(new) => sm.push(new),
HandlerMessage::Change(new) => {sm.pop(); sm.push(new)},
HandlerMessage::Error(err) => return Err(err),
};
Ok(())
}
}
/// Struct used to manage various state the game may be in.
/// Passes required information to the current state.
pub struct StateMachine{
// Stack of States (top is active) [should quit if empty]
states: Vec<Box<dyn CustomEventHandler>>,
}
impl StateMachine {
/// Create a new StateMachine.
pub fn new(state: Box<dyn CustomEventHandler>) -> StateMachine {
let mut states = Vec::new();
states.push(state);
StateMachine {
states,
}
}
/// Push a new state onto state machine's stack.
pub fn push(&mut self, state: Box<dyn CustomEventHandler>) {
self.states.push(state)
}
/// Pops a state off of the state machine's state.
pub fn pop(&mut self) -> Option<Box<dyn CustomEventHandler>> {
self.states.pop()
}
/// Does the state machine have any states.
/// Quits the game if true.
fn is_empty(&self, ctx: &mut Context) -> bool {
if self.states.is_empty() {
event::quit(ctx);
return true
}
false
}
}
/// Implements EventHandler for State (ie state used by LevelBuilder)
/// https://docs.rs/ggez/0.5.1/ggez/event/trait.EventHandler.html
impl EventHandler for StateMachine {
/// Updates the current state.
/// May cause transition to a new state.
fn update(&mut self, ctx: &mut Context) -> GameResult {
// if there are no states don't update anything, just quit
if self.is_empty(ctx) {
return Ok(())
}
// know that states has something in it so it is ok to unwrap
self.states.last_mut().unwrap().update(ctx).handle(self)
}
/// Draws the current State.
fn draw(&mut self, ctx: &mut Context) -> GameResult {
// if there are no states don't draw anything, just quit
// note should already have been checked in StateMachine.update()
if self.is_empty(ctx) {
return Ok(())
}
self.states.last_mut().unwrap().draw(ctx)
}
/// Passes key down event to current state.
/// May cause transition to a new state.
/// Ignores errors... so make sure none occur! (How could that go wrong?)
fn key_down_event(&mut self, ctx: &mut Context, keycode: KeyCode, keymods: KeyMods, repeat: bool) {
// if escape key is hit just quit
// if there are no states don't pass on anything, just quit
if keycode == KeyCode::Escape || self.is_empty(ctx) {
event::quit(ctx);
return
}
match self.states.last_mut().unwrap().key_down_event(ctx, keycode, keymods, repeat).handle(self) {
_ => (),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn create_state_machine_and_context() -> (StateMachine, Context) {
let (mut ctx, _event_loop) =
ggez::ContextBuilder::new("macguffin_quest", "James M. & William O.")
.add_resource_path(std::path::PathBuf::from("./resources/texture"))
.add_resource_path(std::path::PathBuf::from("./resources/font"))
.add_resource_path(std::path::PathBuf::from("./resources/level"))
.build()
.unwrap();
let mm = MainMenuState::new(&mut ctx);
(StateMachine::new(Box::new(mm)), ctx)
}
#[test]
fn test_state_machine_push() {
let (ref mut sm, ref mut ctx) = create_state_machine_and_context();
assert_eq!(sm.states.len(), 1usize);
let mm = MainMenuState::new(ctx);
sm.push(Box::new(mm));
assert_eq!(sm.states.len(), 2usize);
}
#[test]
fn test_state_machine_pop() {
let (ref mut sm, ref mut _ctx) = create_state_machine_and_context();
assert_eq!(sm.states.len(), 1usize);
let r = sm.pop();
assert!(r.is_some());
assert_eq!(sm.states.len(), 0usize);
let r = sm.pop();
assert!(r.is_none());
assert_eq!(sm.states.len(), 0usize);
}
#[test]
fn test_state_machine_is_empty() {
let (ref mut sm, ref mut ctx) = create_state_machine_and_context();
assert_eq!(sm.states.len(), 1usize);
let _r = sm.pop();
let r = sm.is_empty(ctx);
assert!(r);
}
}
| 35.751445 | 148 | 0.621342 |
0e76f569d96385d73d3998883a261c7cb29ede8d
| 85,691 |
extern crate cargo;
extern crate cargotest;
extern crate hamcrest;
extern crate tempdir;
use std::env;
use std::fs::{self, File};
use std::io::prelude::*;
use cargo::util::process;
use cargotest::{is_nightly, rustc_host, sleep_ms};
use cargotest::support::paths::{CargoPathExt,root};
use cargotest::support::{ProjectBuilder};
use cargotest::support::{project, execs, main_file, basic_bin_manifest};
use cargotest::support::registry::Package;
use hamcrest::{assert_that, existing_file, is_not};
use tempdir::TempDir;
#[test]
fn cargo_compile_simple() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]));
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("i am foo\n"));
}
/// Check that the `CARGO_INCREMENTAL` environment variable results in
/// `rustc` getting `-Zincremental` passed to it.
#[test]
fn cargo_compile_incremental() {
if !is_nightly() {
return
}
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]));
p.build();
assert_that(
p.cargo("build").arg("-v").env("CARGO_INCREMENTAL", "1"),
execs().with_stderr_contains(
"[RUNNING] `rustc [..] -Zincremental=[..][/]target[/]debug[/]incremental`\n")
.with_status(0));
assert_that(
p.cargo("test").arg("-v").env("CARGO_INCREMENTAL", "1"),
execs().with_stderr_contains(
"[RUNNING] `rustc [..] -Zincremental=[..][/]target[/]debug[/]incremental`\n")
.with_status(0));
}
#[test]
fn cargo_compile_manifest_path() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]));
assert_that(p.cargo_process("build")
.arg("--manifest-path").arg("foo/Cargo.toml")
.cwd(p.root().parent().unwrap()),
execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
}
#[test]
fn cargo_compile_with_invalid_manifest() {
let p = project("foo")
.file("Cargo.toml", "");
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
no `package` or `project` section found.
"))
}
#[test]
fn cargo_compile_with_invalid_manifest2() {
let p = project("foo")
.file("Cargo.toml", r"
[project]
foo = bar
");
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
could not parse input as TOML
Caused by:
invalid number at line 3
"))
}
#[test]
fn cargo_compile_with_invalid_manifest3() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/Cargo.toml", "a = bar");
assert_that(p.cargo_process("build").arg("--manifest-path")
.arg("src/Cargo.toml"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
could not parse input as TOML
Caused by:
invalid number at line 1
"))
}
#[test]
fn cargo_compile_duplicate_build_targets() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[lib]
name = "main"
crate-type = ["dylib"]
[dependencies]
"#)
.file("src/main.rs", r#"
#![allow(warnings)]
fn main() {}
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(0)
.with_stderr("\
warning: file found to be present in multiple build targets: [..]main.rs
[COMPILING] foo v0.0.1 ([..])
[FINISHED] [..]
"));
}
#[test]
fn cargo_compile_with_invalid_version() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
authors = []
version = "1.0"
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
cannot parse '1.0' as a semver for key `project.version`
"))
}
#[test]
fn cargo_compile_with_invalid_package_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = ""
authors = []
version = "0.0.0"
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
package name cannot be an empty string.
"))
}
#[test]
fn cargo_compile_with_invalid_bin_target_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[[bin]]
name = ""
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
binary target names cannot be empty.
"))
}
#[test]
fn cargo_compile_with_forbidden_bin_target_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[[bin]]
name = "build"
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
the binary target name `build` is forbidden
"))
}
#[test]
fn cargo_compile_with_invalid_lib_target_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[lib]
name = ""
"#);
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]`
Caused by:
library target names cannot be empty.
"))
}
#[test]
fn cargo_compile_without_manifest() {
let tmpdir = TempDir::new("cargo").unwrap();
let p = ProjectBuilder::new("foo", tmpdir.path().to_path_buf());
assert_that(p.cargo_process("build"),
execs().with_status(101)
.with_stderr("\
[ERROR] could not find `Cargo.toml` in `[..]` or any parent directory
"));
}
#[test]
fn cargo_compile_with_invalid_code() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", "invalid rust code!");
assert_that(p.cargo_process("build"),
execs()
.with_status(101)
.with_stderr_contains("\
[ERROR] Could not compile `foo`.
To learn more, run the command again with --verbose.\n"));
assert_that(&p.root().join("Cargo.lock"), existing_file());
}
#[test]
fn cargo_compile_with_invalid_code_in_deps() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.bar]
path = "../bar"
[dependencies.baz]
path = "../baz"
"#)
.file("src/main.rs", "invalid rust code!");
let bar = project("bar")
.file("Cargo.toml", &basic_bin_manifest("bar"))
.file("src/lib.rs", "invalid rust code!");
let baz = project("baz")
.file("Cargo.toml", &basic_bin_manifest("baz"))
.file("src/lib.rs", "invalid rust code!");
bar.build();
baz.build();
assert_that(p.cargo_process("build"), execs().with_status(101));
}
#[test]
fn cargo_compile_with_warnings_in_the_root_package() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", "fn main() {} fn dead() {}");
assert_that(p.cargo_process("build"),
execs().with_status(0).with_stderr_contains("\
[..]function is never used: `dead`[..]
"));
}
#[test]
fn cargo_compile_with_warnings_in_a_dep_package() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = "bar"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "bar"
"#)
.file("bar/src/bar.rs", r#"
pub fn gimme() -> &'static str {
"test passed"
}
fn dead() {}
"#);
assert_that(p.cargo_process("build"),
execs().with_status(0).with_stderr_contains("\
[..]function is never used: `dead`[..]
"));
assert_that(&p.bin("foo"), existing_file());
assert_that(
process(&p.bin("foo")),
execs().with_status(0).with_stdout("test passed\n"));
}
#[test]
fn cargo_compile_with_nested_deps_inferred() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = 'bar'
[[bin]]
name = "foo"
"#)
.file("src/foo.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.baz]
path = "../baz"
"#)
.file("bar/src/lib.rs", r#"
extern crate baz;
pub fn gimme() -> String {
baz::gimme()
}
"#)
.file("baz/Cargo.toml", r#"
[project]
name = "baz"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("baz/src/lib.rs", r#"
pub fn gimme() -> String {
"test passed".to_string()
}
"#);
p.cargo_process("build")
.exec_with_output()
.unwrap();
assert_that(&p.bin("foo"), existing_file());
assert_that(&p.bin("libbar.rlib"), is_not(existing_file()));
assert_that(&p.bin("libbaz.rlib"), is_not(existing_file()));
assert_that(
process(&p.bin("foo")),
execs().with_status(0).with_stdout("test passed\n"));
}
#[test]
fn cargo_compile_with_nested_deps_correct_bin() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = "bar"
[[bin]]
name = "foo"
"#)
.file("src/main.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.baz]
path = "../baz"
"#)
.file("bar/src/lib.rs", r#"
extern crate baz;
pub fn gimme() -> String {
baz::gimme()
}
"#)
.file("baz/Cargo.toml", r#"
[project]
name = "baz"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("baz/src/lib.rs", r#"
pub fn gimme() -> String {
"test passed".to_string()
}
"#);
p.cargo_process("build")
.exec_with_output()
.unwrap();
assert_that(&p.bin("foo"), existing_file());
assert_that(&p.bin("libbar.rlib"), is_not(existing_file()));
assert_that(&p.bin("libbaz.rlib"), is_not(existing_file()));
assert_that(
process(&p.bin("foo")),
execs().with_status(0).with_stdout("test passed\n"));
}
#[test]
fn cargo_compile_with_nested_deps_shorthand() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = "bar"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.baz]
path = "../baz"
[lib]
name = "bar"
"#)
.file("bar/src/bar.rs", r#"
extern crate baz;
pub fn gimme() -> String {
baz::gimme()
}
"#)
.file("baz/Cargo.toml", r#"
[project]
name = "baz"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "baz"
"#)
.file("baz/src/baz.rs", r#"
pub fn gimme() -> String {
"test passed".to_string()
}
"#);
p.cargo_process("build")
.exec_with_output()
.unwrap();
assert_that(&p.bin("foo"), existing_file());
assert_that(&p.bin("libbar.rlib"), is_not(existing_file()));
assert_that(&p.bin("libbaz.rlib"), is_not(existing_file()));
assert_that(
process(&p.bin("foo")),
execs().with_status(0).with_stdout("test passed\n"));
}
#[test]
fn cargo_compile_with_nested_deps_longhand() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = "bar"
version = "0.5.0"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.baz]
path = "../baz"
version = "0.5.0"
[lib]
name = "bar"
"#)
.file("bar/src/bar.rs", r#"
extern crate baz;
pub fn gimme() -> String {
baz::gimme()
}
"#)
.file("baz/Cargo.toml", r#"
[project]
name = "baz"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "baz"
"#)
.file("baz/src/baz.rs", r#"
pub fn gimme() -> String {
"test passed".to_string()
}
"#);
assert_that(p.cargo_process("build"), execs());
assert_that(&p.bin("foo"), existing_file());
assert_that(&p.bin("libbar.rlib"), is_not(existing_file()));
assert_that(&p.bin("libbaz.rlib"), is_not(existing_file()));
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("test passed\n"));
}
// Check that Cargo gives a sensible error if a dependency can't be found
// because of a name mismatch.
#[test]
fn cargo_compile_with_dep_name_mismatch() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = ["[email protected]"]
[[bin]]
name = "foo"
[dependencies.notquitebar]
path = "bar"
"#)
.file("src/foo.rs", &main_file(r#""i am foo""#, &["bar"]))
.file("bar/Cargo.toml", &basic_bin_manifest("bar"))
.file("bar/src/bar.rs", &main_file(r#""i am bar""#, &[]));
assert_that(p.cargo_process("build"),
execs().with_status(101).with_stderr(&format!(
r#"[ERROR] no matching package named `notquitebar` found (required by `foo`)
location searched: {proj_dir}/bar
version required: *
"#, proj_dir = p.url())));
}
#[test]
fn cargo_compile_with_filename() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "")
.file("src/bin/a.rs", r#"
extern crate foo;
fn main() { println!("hello a.rs"); }
"#)
.file("examples/a.rs", r#"
fn main() { println!("example"); }
"#);
p.build();
assert_that(p.cargo("build").arg("--bin").arg("bin.rs"),
execs().with_status(101).with_stderr("\
[ERROR] no bin target named `bin.rs`"));
assert_that(p.cargo("build").arg("--bin").arg("a.rs"),
execs().with_status(101).with_stderr("\
[ERROR] no bin target named `a.rs`
Did you mean `a`?"));
assert_that(p.cargo("build").arg("--example").arg("example.rs"),
execs().with_status(101).with_stderr("\
[ERROR] no example target named `example.rs`"));
assert_that(p.cargo("build").arg("--example").arg("a.rs"),
execs().with_status(101).with_stderr("\
[ERROR] no example target named `a.rs`
Did you mean `a`?"));
}
#[test]
fn compile_path_dep_then_change_version() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.bar]
path = "bar"
"#)
.file("src/lib.rs", "")
.file("bar/Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
"#)
.file("bar/src/lib.rs", "");
assert_that(p.cargo_process("build"), execs().with_status(0));
File::create(&p.root().join("bar/Cargo.toml")).unwrap().write_all(br#"
[package]
name = "bar"
version = "0.0.2"
authors = []
"#).unwrap();
assert_that(p.cargo("build"),
execs().with_status(101).with_stderr("\
[ERROR] no matching version `= 0.0.1` found for package `bar` (required by `foo`)
location searched: [..]
versions found: 0.0.2
consider running `cargo update` to update a path dependency's locked version
"));
}
#[test]
fn ignores_carriage_return_in_lockfile() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.1"
"#)
.file("src/main.rs", r#"
mod a; fn main() {}
"#)
.file("src/a.rs", "");
assert_that(p.cargo_process("build"),
execs().with_status(0));
let lockfile = p.root().join("Cargo.lock");
let mut lock = String::new();
File::open(&lockfile).unwrap().read_to_string(&mut lock).unwrap();
let lock = lock.replace("\n", "\r\n");
File::create(&lockfile).unwrap().write_all(lock.as_bytes()).unwrap();
assert_that(p.cargo("build"),
execs().with_status(0));
}
#[test]
fn cargo_default_env_metadata_env_var() {
// Ensure that path dep + dylib + env_var get metadata
// (even though path_dep + dylib should not)
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.bar]
path = "bar"
"#)
.file("src/lib.rs", "// hi")
.file("bar/Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
[lib]
name = "bar"
crate_type = ["dylib"]
"#)
.file("bar/src/lib.rs", "// hello");
p.build();
// No metadata on libbar since it's a dylib path dependency
assert_that(p.cargo("build").arg("-v"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] bar v0.0.1 ({url}/bar)
[RUNNING] `rustc --crate-name bar bar[/]src[/]lib.rs --crate-type dylib \
--emit=dep-info,link \
-C prefer-dynamic -C debuginfo=2 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]debug[/]deps`
[COMPILING] foo v0.0.1 ({url})
[RUNNING] `rustc --crate-name foo src[/]lib.rs --crate-type lib \
--emit=dep-info,link -C debuginfo=2 \
-C metadata=[..] \
-C extra-filename=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]debug[/]deps \
--extern bar={dir}[/]target[/]debug[/]deps[/]{prefix}bar{suffix}`
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]",
dir = p.root().display(),
url = p.url(),
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX,
)));
assert_that(p.cargo("clean"), execs().with_status(0));
// If you set the env-var, then we expect metadata on libbar
assert_that(p.cargo("build").arg("-v").env("__CARGO_DEFAULT_LIB_METADATA", "1"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] bar v0.0.1 ({url}/bar)
[RUNNING] `rustc --crate-name bar bar[/]src[/]lib.rs --crate-type dylib \
--emit=dep-info,link \
-C prefer-dynamic -C debuginfo=2 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]debug[/]deps`
[COMPILING] foo v0.0.1 ({url})
[RUNNING] `rustc --crate-name foo src[/]lib.rs --crate-type lib \
--emit=dep-info,link -C debuginfo=2 \
-C metadata=[..] \
-C extra-filename=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]debug[/]deps \
--extern bar={dir}[/]target[/]debug[/]deps[/]{prefix}bar-[..]{suffix}`
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
",
dir = p.root().display(),
url = p.url(),
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX,
)));
}
#[test]
fn crate_env_vars() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.1-alpha.1"
description = "This is foo"
homepage = "http://example.com"
authors = ["[email protected]"]
"#)
.file("src/main.rs", r#"
extern crate foo;
static VERSION_MAJOR: &'static str = env!("CARGO_PKG_VERSION_MAJOR");
static VERSION_MINOR: &'static str = env!("CARGO_PKG_VERSION_MINOR");
static VERSION_PATCH: &'static str = env!("CARGO_PKG_VERSION_PATCH");
static VERSION_PRE: &'static str = env!("CARGO_PKG_VERSION_PRE");
static VERSION: &'static str = env!("CARGO_PKG_VERSION");
static CARGO_MANIFEST_DIR: &'static str = env!("CARGO_MANIFEST_DIR");
static PKG_NAME: &'static str = env!("CARGO_PKG_NAME");
static HOMEPAGE: &'static str = env!("CARGO_PKG_HOMEPAGE");
static DESCRIPTION: &'static str = env!("CARGO_PKG_DESCRIPTION");
fn main() {
let s = format!("{}-{}-{} @ {} in {}", VERSION_MAJOR,
VERSION_MINOR, VERSION_PATCH, VERSION_PRE,
CARGO_MANIFEST_DIR);
assert_eq!(s, foo::version());
println!("{}", s);
assert_eq!("foo", PKG_NAME);
assert_eq!("http://example.com", HOMEPAGE);
assert_eq!("This is foo", DESCRIPTION);
let s = format!("{}.{}.{}-{}", VERSION_MAJOR,
VERSION_MINOR, VERSION_PATCH, VERSION_PRE);
assert_eq!(s, VERSION);
}
"#)
.file("src/lib.rs", r#"
pub fn version() -> String {
format!("{}-{}-{} @ {} in {}",
env!("CARGO_PKG_VERSION_MAJOR"),
env!("CARGO_PKG_VERSION_MINOR"),
env!("CARGO_PKG_VERSION_PATCH"),
env!("CARGO_PKG_VERSION_PRE"),
env!("CARGO_MANIFEST_DIR"))
}
"#);
println!("build");
assert_that(p.cargo_process("build").arg("-v"), execs().with_status(0));
println!("bin");
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout(&format!("0-5-1 @ alpha.1 in {}\n",
p.root().display())));
println!("test");
assert_that(p.cargo("test").arg("-v"),
execs().with_status(0));
}
#[test]
fn crate_authors_env_vars() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.1-alpha.1"
authors = ["[email protected]", "[email protected]"]
"#)
.file("src/main.rs", r#"
extern crate foo;
static AUTHORS: &'static str = env!("CARGO_PKG_AUTHORS");
fn main() {
let s = "[email protected]:[email protected]";
assert_eq!(AUTHORS, foo::authors());
println!("{}", AUTHORS);
assert_eq!(s, AUTHORS);
}
"#)
.file("src/lib.rs", r#"
pub fn authors() -> String {
format!("{}", env!("CARGO_PKG_AUTHORS"))
}
"#);
println!("build");
assert_that(p.cargo_process("build").arg("-v"), execs().with_status(0));
println!("bin");
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("[email protected]:[email protected]"));
println!("test");
assert_that(p.cargo("test").arg("-v"),
execs().with_status(0));
}
// this is testing that src/<pkg-name>.rs still works (for now)
#[test]
fn many_crate_types_old_style_lib_location() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "foo"
crate_type = ["rlib", "dylib"]
"#)
.file("src/foo.rs", r#"
pub fn foo() {}
"#);
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(&p.root().join("target/debug/libfoo.rlib"), existing_file());
let fname = format!("{}foo{}", env::consts::DLL_PREFIX,
env::consts::DLL_SUFFIX);
assert_that(&p.root().join("target/debug").join(&fname), existing_file());
}
#[test]
fn many_crate_types_correct() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "foo"
crate_type = ["rlib", "dylib"]
"#)
.file("src/lib.rs", r#"
pub fn foo() {}
"#);
assert_that(p.cargo_process("build"),
execs().with_status(0));
assert_that(&p.root().join("target/debug/libfoo.rlib"), existing_file());
let fname = format!("{}foo{}", env::consts::DLL_PREFIX,
env::consts::DLL_SUFFIX);
assert_that(&p.root().join("target/debug").join(&fname), existing_file());
}
#[test]
fn unused_keys() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
bulid = "foo"
[lib]
name = "foo"
"#)
.file("src/foo.rs", r#"
pub fn foo() {}
"#);
assert_that(p.cargo_process("build"),
execs().with_status(0)
.with_stderr("\
warning: unused manifest key: project.bulid
[COMPILING] foo [..]
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
"));
let mut p = project("bar");
p = p
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[lib]
name = "foo"
build = "foo"
"#)
.file("src/foo.rs", r#"
pub fn foo() {}
"#);
assert_that(p.cargo_process("build"),
execs().with_status(0)
.with_stderr("\
warning: unused manifest key: lib.build
[COMPILING] foo [..]
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
"));
}
#[test]
fn self_dependency() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[dependencies.test]
path = "."
[lib]
name = "test"
"#)
.file("src/test.rs", "fn main() {}");
assert_that(p.cargo_process("build"),
execs().with_status(101)
.with_stderr("\
[ERROR] cyclic package dependency: package `test v0.0.0 ([..])` depends on itself
"));
}
#[test]
fn ignore_broken_symlinks() {
// windows and symlinks don't currently agree that well
if cfg!(windows) { return }
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]))
.symlink("Notafile", "bar");
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("i am foo\n"));
}
#[test]
fn missing_lib_and_bin() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
"#);
assert_that(p.cargo_process("build"),
execs().with_status(101)
.with_stderr("\
[ERROR] failed to parse manifest at `[..]Cargo.toml`
Caused by:
no targets specified in the manifest
either src/lib.rs, src/main.rs, a [lib] section, or [[bin]] section must be present\n"));
}
#[test]
fn lto_build() {
// FIXME: currently this hits a linker bug on 32-bit MSVC
if cfg!(all(target_env = "msvc", target_pointer_width = "32")) {
return
}
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[profile.release]
lto = true
"#)
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] test v0.0.0 ({url})
[RUNNING] `rustc --crate-name test src[/]main.rs --crate-type bin \
--emit=dep-info,link \
-C opt-level=3 \
-C lto \
-C metadata=[..] \
--out-dir {dir}[/]target[/]release[/]deps \
-L dependency={dir}[/]target[/]release[/]deps`
[FINISHED] release [optimized] target(s) in [..]
",
dir = p.root().display(),
url = p.url(),
)));
}
#[test]
fn verbose_build() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] test v0.0.0 ({url})
[RUNNING] `rustc --crate-name test src[/]lib.rs --crate-type lib \
--emit=dep-info,link -C debuginfo=2 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]debug[/]deps`
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
",
dir = p.root().display(),
url = p.url(),
)));
}
#[test]
fn verbose_release_build() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] test v0.0.0 ({url})
[RUNNING] `rustc --crate-name test src[/]lib.rs --crate-type lib \
--emit=dep-info,link \
-C opt-level=3 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]release[/]deps`
[FINISHED] release [optimized] target(s) in [..]
",
dir = p.root().display(),
url = p.url(),
)));
}
#[test]
fn verbose_release_build_deps() {
let mut p = project("foo");
p = p
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
[dependencies.foo]
path = "foo"
"#)
.file("src/lib.rs", "")
.file("foo/Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[lib]
name = "foo"
crate_type = ["dylib", "rlib"]
"#)
.file("foo/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v").arg("--release"),
execs().with_status(0).with_stderr(&format!("\
[COMPILING] foo v0.0.0 ({url}/foo)
[RUNNING] `rustc --crate-name foo foo[/]src[/]lib.rs \
--crate-type dylib --crate-type rlib \
--emit=dep-info,link \
-C prefer-dynamic \
-C opt-level=3 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]release[/]deps`
[COMPILING] test v0.0.0 ({url})
[RUNNING] `rustc --crate-name test src[/]lib.rs --crate-type lib \
--emit=dep-info,link \
-C opt-level=3 \
-C metadata=[..] \
--out-dir [..] \
-L dependency={dir}[/]target[/]release[/]deps \
--extern foo={dir}[/]target[/]release[/]deps[/]{prefix}foo{suffix} \
--extern foo={dir}[/]target[/]release[/]deps[/]libfoo.rlib`
[FINISHED] release [optimized] target(s) in [..]
",
dir = p.root().display(),
url = p.url(),
prefix = env::consts::DLL_PREFIX,
suffix = env::consts::DLL_SUFFIX)));
}
#[test]
fn explicit_examples() {
let mut p = project("world");
p = p.file("Cargo.toml", r#"
[package]
name = "world"
version = "1.0.0"
authors = []
[lib]
name = "world"
path = "src/lib.rs"
[[example]]
name = "hello"
path = "examples/ex-hello.rs"
[[example]]
name = "goodbye"
path = "examples/ex-goodbye.rs"
"#)
.file("src/lib.rs", r#"
pub fn get_hello() -> &'static str { "Hello" }
pub fn get_goodbye() -> &'static str { "Goodbye" }
pub fn get_world() -> &'static str { "World" }
"#)
.file("examples/ex-hello.rs", r#"
extern crate world;
fn main() { println!("{}, {}!", world::get_hello(), world::get_world()); }
"#)
.file("examples/ex-goodbye.rs", r#"
extern crate world;
fn main() { println!("{}, {}!", world::get_goodbye(), world::get_world()); }
"#);
assert_that(p.cargo_process("test").arg("-v"), execs().with_status(0));
assert_that(process(&p.bin("examples/hello")),
execs().with_status(0).with_stdout("Hello, World!\n"));
assert_that(process(&p.bin("examples/goodbye")),
execs().with_status(0).with_stdout("Goodbye, World!\n"));
}
#[test]
fn implicit_examples() {
let mut p = project("world");
p = p.file("Cargo.toml", r#"
[package]
name = "world"
version = "1.0.0"
authors = []
"#)
.file("src/lib.rs", r#"
pub fn get_hello() -> &'static str { "Hello" }
pub fn get_goodbye() -> &'static str { "Goodbye" }
pub fn get_world() -> &'static str { "World" }
"#)
.file("examples/hello.rs", r#"
extern crate world;
fn main() {
println!("{}, {}!", world::get_hello(), world::get_world());
}
"#)
.file("examples/goodbye.rs", r#"
extern crate world;
fn main() {
println!("{}, {}!", world::get_goodbye(), world::get_world());
}
"#);
assert_that(p.cargo_process("test"), execs().with_status(0));
assert_that(process(&p.bin("examples/hello")),
execs().with_status(0).with_stdout("Hello, World!\n"));
assert_that(process(&p.bin("examples/goodbye")),
execs().with_status(0).with_stdout("Goodbye, World!\n"));
}
#[test]
fn standard_build_no_ndebug() {
let p = project("world")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", r#"
fn main() {
if cfg!(debug_assertions) {
println!("slow")
} else {
println!("fast")
}
}
"#);
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("slow\n"));
}
#[test]
fn release_build_ndebug() {
let p = project("world")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", r#"
fn main() {
if cfg!(debug_assertions) {
println!("slow")
} else {
println!("fast")
}
}
"#);
assert_that(p.cargo_process("build").arg("--release"),
execs().with_status(0));
assert_that(process(&p.release_bin("foo")),
execs().with_status(0).with_stdout("fast\n"));
}
#[test]
fn inferred_main_bin() {
let p = project("world")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", r#"
fn main() {}
"#);
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(process(&p.bin("foo")), execs().with_status(0));
}
#[test]
fn deletion_causes_failure() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.bar]
path = "bar"
"#)
.file("src/main.rs", r#"
extern crate bar;
fn main() {}
"#)
.file("bar/Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.1"
authors = []
"#)
.file("bar/src/lib.rs", "");
p.build();
assert_that(p.cargo("build"), execs().with_status(0));
p.change_file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#);
assert_that(p.cargo("build"), execs().with_status(101));
}
#[test]
fn bad_cargo_toml_in_target_dir() {
let p = project("world")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", r#"
fn main() {}
"#)
.file("target/Cargo.toml", "bad-toml");
assert_that(p.cargo_process("build"), execs().with_status(0));
assert_that(process(&p.bin("foo")), execs().with_status(0));
}
#[test]
fn lib_with_standard_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "syntax"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "
pub fn foo() {}
")
.file("src/main.rs", "
extern crate syntax;
fn main() { syntax::foo() }
");
assert_that(p.cargo_process("build"),
execs().with_status(0)
.with_stderr(&format!("\
[COMPILING] syntax v0.0.1 ({dir})
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
",
dir = p.url())));
}
#[test]
fn simple_staticlib() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.1"
[lib]
name = "foo"
crate-type = ["staticlib"]
"#)
.file("src/lib.rs", "pub fn foo() {}");
// env var is a test for #1381
assert_that(p.cargo_process("build").env("RUST_LOG", "nekoneko=trace"),
execs().with_status(0));
}
#[test]
fn staticlib_rlib_and_bin() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.1"
[lib]
name = "foo"
crate-type = ["staticlib", "rlib"]
"#)
.file("src/lib.rs", "pub fn foo() {}")
.file("src/main.rs", r#"
extern crate foo;
fn main() {
foo::foo();
}"#);
assert_that(p.cargo_process("build").arg("-v"), execs().with_status(0));
}
#[test]
fn opt_out_of_bin() {
let p = project("foo")
.file("Cargo.toml", r#"
bin = []
[package]
name = "foo"
authors = []
version = "0.0.1"
"#)
.file("src/lib.rs", "")
.file("src/main.rs", "bad syntax");
assert_that(p.cargo_process("build"), execs().with_status(0));
}
#[test]
fn single_lib() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.1"
[lib]
name = "foo"
path = "src/bar.rs"
"#)
.file("src/bar.rs", "");
assert_that(p.cargo_process("build"), execs().with_status(0));
}
#[test]
fn freshness_ignores_excluded() {
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
build = "build.rs"
exclude = ["src/b*.rs"]
"#)
.file("build.rs", "fn main() {}")
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }");
foo.build();
foo.root().move_into_the_past();
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[COMPILING] foo v0.0.0 ({url})
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
", url = foo.url())));
// Smoke test to make sure it doesn't compile again
println!("first pass");
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stdout(""));
// Modify an ignored file and make sure we don't rebuild
println!("second pass");
File::create(&foo.root().join("src/bar.rs")).unwrap();
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stdout(""));
}
#[test]
fn rebuild_preserves_out_dir() {
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
build = 'build.rs'
"#)
.file("build.rs", r#"
use std::env;
use std::fs::File;
use std::path::Path;
fn main() {
let path = Path::new(&env::var("OUT_DIR").unwrap()).join("foo");
if env::var_os("FIRST").is_some() {
File::create(&path).unwrap();
} else {
File::create(&path).unwrap();
}
}
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }");
foo.build();
foo.root().move_into_the_past();
assert_that(foo.cargo("build").env("FIRST", "1"),
execs().with_status(0)
.with_stderr(&format!("\
[COMPILING] foo v0.0.0 ({url})
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
", url = foo.url())));
File::create(&foo.root().join("src/bar.rs")).unwrap();
assert_that(foo.cargo("build"),
execs().with_status(0)
.with_stderr(&format!("\
[COMPILING] foo v0.0.0 ({url})
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
", url = foo.url())));
}
#[test]
fn dep_no_libs() {
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[dependencies.bar]
path = "bar"
"#)
.file("src/lib.rs", "pub fn bar() -> i32 { 1 }")
.file("bar/Cargo.toml", r#"
[package]
name = "bar"
version = "0.0.0"
authors = []
"#)
.file("bar/src/main.rs", "");
assert_that(foo.cargo_process("build"),
execs().with_status(0));
}
#[test]
fn recompile_space_in_name() {
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
[lib]
name = "foo"
path = "src/my lib.rs"
"#)
.file("src/my lib.rs", "");
assert_that(foo.cargo_process("build"), execs().with_status(0));
foo.root().move_into_the_past();
assert_that(foo.cargo("build"),
execs().with_status(0).with_stdout(""));
}
#[cfg(unix)]
#[test]
fn ignore_bad_directories() {
use std::os::unix::prelude::*;
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "");
foo.build();
let dir = foo.root().join("tmp");
fs::create_dir(&dir).unwrap();
let stat = fs::metadata(&dir).unwrap();
let mut perms = stat.permissions();
perms.set_mode(0o644);
fs::set_permissions(&dir, perms.clone()).unwrap();
assert_that(foo.cargo("build"),
execs().with_status(0));
perms.set_mode(0o755);
fs::set_permissions(&dir, perms).unwrap();
}
#[test]
fn bad_cargo_config() {
let foo = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "")
.file(".cargo/config", r#"
this is not valid toml
"#);
assert_that(foo.cargo_process("build").arg("-v"),
execs().with_status(101).with_stderr("\
[ERROR] Couldn't load Cargo configuration
Caused by:
could not parse TOML configuration in `[..]`
Caused by:
could not parse input as TOML
Caused by:
expected an equals, found an identifier at line 2
"));
}
#[test]
fn cargo_platform_specific_dependency() {
let host = rustc_host();
let p = project("foo")
.file("Cargo.toml", &format!(r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
build = "build.rs"
[target.{host}.dependencies]
dep = {{ path = "dep" }}
[target.{host}.build-dependencies]
build = {{ path = "build" }}
[target.{host}.dev-dependencies]
dev = {{ path = "dev" }}
"#, host = host))
.file("src/main.rs", r#"
extern crate dep;
fn main() { dep::dep() }
"#)
.file("tests/foo.rs", r#"
extern crate dev;
#[test]
fn foo() { dev::dev() }
"#)
.file("build.rs", r#"
extern crate build;
fn main() { build::build(); }
"#)
.file("dep/Cargo.toml", r#"
[project]
name = "dep"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("dep/src/lib.rs", "pub fn dep() {}")
.file("build/Cargo.toml", r#"
[project]
name = "build"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("build/src/lib.rs", "pub fn build() {}")
.file("dev/Cargo.toml", r#"
[project]
name = "dev"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("dev/src/lib.rs", "pub fn dev() {}");
assert_that(p.cargo_process("build"),
execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
assert_that(p.cargo("test"),
execs().with_status(0));
}
#[test]
fn bad_platform_specific_dependency() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[target.wrong-target.dependencies.bar]
path = "bar"
"#)
.file("src/main.rs",
&main_file(r#""{}", bar::gimme()"#, &["bar"]))
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("bar/src/lib.rs", r#"
extern crate baz;
pub fn gimme() -> String {
format!("")
}
"#);
assert_that(p.cargo_process("build"),
execs().with_status(101));
}
#[test]
fn cargo_platform_specific_dependency_wrong_platform() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[target.non-existing-triplet.dependencies.bar]
path = "bar"
"#)
.file("src/main.rs", r#"
fn main() {}
"#)
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("bar/src/lib.rs", r#"
invalid rust file, should not be compiled
"#);
p.cargo_process("build").exec_with_output().unwrap();
assert_that(&p.bin("foo"), existing_file());
assert_that(process(&p.bin("foo")),
execs().with_status(0));
let loc = p.root().join("Cargo.lock");
let mut lockfile = String::new();
File::open(&loc).unwrap().read_to_string(&mut lockfile).unwrap();
assert!(lockfile.contains("bar"))
}
#[test]
fn example_as_lib() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[[example]]
name = "ex"
crate-type = ["lib"]
"#)
.file("src/lib.rs", "")
.file("examples/ex.rs", "");
assert_that(p.cargo_process("build").arg("--example=ex"), execs().with_status(0));
assert_that(&p.example_lib("ex", "lib"), existing_file());
}
#[test]
fn example_as_rlib() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[[example]]
name = "ex"
crate-type = ["rlib"]
"#)
.file("src/lib.rs", "")
.file("examples/ex.rs", "");
assert_that(p.cargo_process("build").arg("--example=ex"), execs().with_status(0));
assert_that(&p.example_lib("ex", "rlib"), existing_file());
}
#[test]
fn example_as_dylib() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[[example]]
name = "ex"
crate-type = ["dylib"]
"#)
.file("src/lib.rs", "")
.file("examples/ex.rs", "");
assert_that(p.cargo_process("build").arg("--example=ex"), execs().with_status(0));
assert_that(&p.example_lib("ex", "dylib"), existing_file());
}
#[test]
fn example_as_proc_macro() {
if !is_nightly() {
return;
}
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[[example]]
name = "ex"
crate-type = ["proc-macro"]
"#)
.file("src/lib.rs", "")
.file("examples/ex.rs", "#![feature(proc_macro)]");
assert_that(p.cargo_process("build").arg("--example=ex"), execs().with_status(0));
assert_that(&p.example_lib("ex", "proc-macro"), existing_file());
}
#[test]
fn example_bin_same_name() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", "fn main() {}")
.file("examples/foo.rs", "fn main() {}");
p.cargo_process("test").arg("--no-run").arg("-v")
.exec_with_output()
.unwrap();
assert_that(&p.bin("foo"), is_not(existing_file()));
// We expect a file of the form bin/foo-{metadata_hash}
assert_that(&p.bin("examples/foo"), existing_file());
p.cargo("test").arg("--no-run").arg("-v")
.exec_with_output()
.unwrap();
assert_that(&p.bin("foo"), is_not(existing_file()));
// We expect a file of the form bin/foo-{metadata_hash}
assert_that(&p.bin("examples/foo"), existing_file());
}
#[test]
fn compile_then_delete() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("run").arg("-v"), execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
if cfg!(windows) {
// On windows unlinking immediately after running often fails, so sleep
sleep_ms(100);
}
fs::remove_file(&p.bin("foo")).unwrap();
assert_that(p.cargo("run").arg("-v"),
execs().with_status(0));
}
#[test]
fn transitive_dependencies_not_available() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.aaaaa]
path = "a"
"#)
.file("src/main.rs", "extern crate bbbbb; extern crate aaaaa; fn main() {}")
.file("a/Cargo.toml", r#"
[package]
name = "aaaaa"
version = "0.0.1"
authors = []
[dependencies.bbbbb]
path = "../b"
"#)
.file("a/src/lib.rs", "extern crate bbbbb;")
.file("b/Cargo.toml", r#"
[package]
name = "bbbbb"
version = "0.0.1"
authors = []
"#)
.file("b/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(101)
.with_stderr_contains("\
[..] can't find crate for `bbbbb`[..]
"));
}
#[test]
fn cyclic_deps_rejected() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.a]
path = "a"
"#)
.file("src/lib.rs", "")
.file("a/Cargo.toml", r#"
[package]
name = "a"
version = "0.0.1"
authors = []
[dependencies.foo]
path = ".."
"#)
.file("a/src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(101)
.with_stderr("\
[ERROR] cyclic package dependency: package `a v0.0.1 ([..])` depends on itself
"));
}
#[test]
fn predictable_filenames() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[lib]
name = "foo"
crate-type = ["dylib", "rlib"]
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0));
assert_that(&p.root().join("target/debug/libfoo.rlib"), existing_file());
let dylib_name = format!("{}foo{}", env::consts::DLL_PREFIX,
env::consts::DLL_SUFFIX);
assert_that(&p.root().join("target/debug").join(dylib_name),
existing_file());
}
#[test]
fn dashes_to_underscores() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo-bar"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "")
.file("src/main.rs", "extern crate foo_bar; fn main() {}");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0));
assert_that(&p.bin("foo-bar"), existing_file());
}
#[test]
fn dashes_in_crate_name_bad() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[lib]
name = "foo-bar"
"#)
.file("src/lib.rs", "")
.file("src/main.rs", "extern crate foo_bar; fn main() {}");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(101));
}
#[test]
fn rustc_env_var() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "");
p.build();
assert_that(p.cargo("build")
.env("RUSTC", "rustc-that-does-not-exist").arg("-v"),
execs().with_status(101)
.with_stderr("\
[ERROR] could not execute process `rustc-that-does-not-exist -vV` ([..])
Caused by:
[..]
"));
assert_that(&p.bin("a"), is_not(existing_file()));
}
#[test]
fn filtering() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "")
.file("src/bin/a.rs", "fn main() {}")
.file("src/bin/b.rs", "fn main() {}")
.file("examples/a.rs", "fn main() {}")
.file("examples/b.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build").arg("--lib"),
execs().with_status(0));
assert_that(&p.bin("a"), is_not(existing_file()));
assert_that(p.cargo("build").arg("--bin=a").arg("--example=a"),
execs().with_status(0));
assert_that(&p.bin("a"), existing_file());
assert_that(&p.bin("b"), is_not(existing_file()));
assert_that(&p.bin("examples/a"), existing_file());
assert_that(&p.bin("examples/b"), is_not(existing_file()));
}
#[test]
fn filtering_implicit_bins() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "")
.file("src/bin/a.rs", "fn main() {}")
.file("src/bin/b.rs", "fn main() {}")
.file("examples/a.rs", "fn main() {}")
.file("examples/b.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build").arg("--bins"),
execs().with_status(0));
assert_that(&p.bin("a"), existing_file());
assert_that(&p.bin("b"), existing_file());
assert_that(&p.bin("examples/a"), is_not(existing_file()));
assert_that(&p.bin("examples/b"), is_not(existing_file()));
}
#[test]
fn filtering_implicit_examples() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/lib.rs", "")
.file("src/bin/a.rs", "fn main() {}")
.file("src/bin/b.rs", "fn main() {}")
.file("examples/a.rs", "fn main() {}")
.file("examples/b.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build").arg("--examples"),
execs().with_status(0));
assert_that(&p.bin("a"), is_not(existing_file()));
assert_that(&p.bin("b"), is_not(existing_file()));
assert_that(&p.bin("examples/a"), existing_file());
assert_that(&p.bin("examples/b"), existing_file());
}
#[test]
fn ignore_dotfile() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/bin/.a.rs", "")
.file("src/bin/a.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0));
}
#[test]
fn ignore_dotdirs() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/bin/a.rs", "fn main() {}")
.file(".git/Cargo.toml", "")
.file(".pc/dummy-fix.patch/Cargo.toml", "");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0));
}
#[test]
fn dotdir_root() {
let p = ProjectBuilder::new("foo", root().join(".foo"))
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/bin/a.rs", "fn main() {}");
p.build();
assert_that(p.cargo("build"),
execs().with_status(0));
}
#[test]
fn custom_target_dir() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", "fn main() {}");
p.build();
let exe_name = format!("foo{}", env::consts::EXE_SUFFIX);
assert_that(p.cargo("build").env("CARGO_TARGET_DIR", "foo/target"),
execs().with_status(0));
assert_that(&p.root().join("foo/target/debug").join(&exe_name),
existing_file());
assert_that(&p.root().join("target/debug").join(&exe_name),
is_not(existing_file()));
assert_that(p.cargo("build"),
execs().with_status(0));
assert_that(&p.root().join("foo/target/debug").join(&exe_name),
existing_file());
assert_that(&p.root().join("target/debug").join(&exe_name),
existing_file());
fs::create_dir(p.root().join(".cargo")).unwrap();
File::create(p.root().join(".cargo/config")).unwrap().write_all(br#"
[build]
target-dir = "foo/target"
"#).unwrap();
assert_that(p.cargo("build").env("CARGO_TARGET_DIR", "bar/target"),
execs().with_status(0));
assert_that(&p.root().join("bar/target/debug").join(&exe_name),
existing_file());
assert_that(&p.root().join("foo/target/debug").join(&exe_name),
existing_file());
assert_that(&p.root().join("target/debug").join(&exe_name),
existing_file());
}
#[test]
fn rustc_no_trans() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
"#)
.file("src/main.rs", "fn main() {}");
p.build();
assert_that(p.cargo("rustc").arg("-v").arg("--").arg("-Zno-trans"),
execs().with_status(0));
}
#[test]
fn build_multiple_packages() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.d1]
path = "d1"
[dependencies.d2]
path = "d2"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]))
.file("d1/Cargo.toml", r#"
[package]
name = "d1"
version = "0.0.1"
authors = []
[[bin]]
name = "d1"
"#)
.file("d1/src/lib.rs", "")
.file("d1/src/main.rs", "fn main() { println!(\"d1\"); }")
.file("d2/Cargo.toml", r#"
[package]
name = "d2"
version = "0.0.1"
authors = []
[[bin]]
name = "d2"
doctest = false
"#)
.file("d2/src/main.rs", "fn main() { println!(\"d2\"); }");
assert_that(p.cargo_process("build").arg("-p").arg("d1").arg("-p").arg("d2")
.arg("-p").arg("foo"),
execs().with_status(0));
assert_that(&p.bin("foo"), existing_file());
assert_that(process(&p.bin("foo")),
execs().with_status(0).with_stdout("i am foo\n"));
let d1_path = &p.build_dir().join("debug")
.join(format!("d1{}", env::consts::EXE_SUFFIX));
let d2_path = &p.build_dir().join("debug")
.join(format!("d2{}", env::consts::EXE_SUFFIX));
assert_that(d1_path, existing_file());
assert_that(process(d1_path), execs().with_status(0).with_stdout("d1"));
assert_that(d2_path, existing_file());
assert_that(process(d2_path),
execs().with_status(0).with_stdout("d2"));
}
#[test]
fn invalid_spec() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[dependencies.d1]
path = "d1"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]))
.file("d1/Cargo.toml", r#"
[package]
name = "d1"
version = "0.0.1"
authors = []
[[bin]]
name = "d1"
"#)
.file("d1/src/lib.rs", "")
.file("d1/src/main.rs", "fn main() { println!(\"d1\"); }");
p.build();
assert_that(p.cargo("build").arg("-p").arg("notAValidDep"),
execs().with_status(101).with_stderr("\
[ERROR] package id specification `notAValidDep` matched no packages
"));
assert_that(p.cargo("build").arg("-p").arg("d1").arg("-p").arg("notAValidDep"),
execs().with_status(101).with_stderr("\
[ERROR] package id specification `notAValidDep` matched no packages
"));
}
#[test]
fn manifest_with_bom_is_ok() {
let p = project("foo")
.file("Cargo.toml", "\u{FEFF}
[package]
name = \"foo\"
version = \"0.0.1\"
authors = []
")
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0));
}
#[test]
fn panic_abort_compiles_with_panic_abort() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[profile.dev]
panic = 'abort'
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build").arg("-v"),
execs().with_status(0)
.with_stderr_contains("[..] -C panic=abort [..]"));
}
#[test]
fn explicit_color_config_is_propagated_to_rustc() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "test"
version = "0.0.0"
authors = []
"#)
.file("src/lib.rs", "");
p.build();
assert_that(p.cargo("build").arg("-v").arg("--color").arg("always"),
execs().with_status(0).with_stderr_contains(
"[..]rustc [..] src[/]lib.rs --color always[..]"));
assert_that(p.cargo("clean"), execs().with_status(0));
assert_that(p.cargo("build").arg("-v").arg("--color").arg("never"),
execs().with_status(0).with_stderr("\
[COMPILING] test v0.0.0 ([..])
[RUNNING] `rustc [..] --color never [..]`
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]
"));
}
#[test]
fn compiler_json_error_format() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.5.0"
authors = ["[email protected]"]
[dependencies.bar]
path = "bar"
"#)
.file("src/main.rs", "fn main() { let unused = 92; }")
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.5.0"
authors = ["[email protected]"]
"#)
.file("bar/src/lib.rs", r#"fn dead() {}"#);
p.build();
assert_that(p.cargo("build").arg("-v")
.arg("--message-format").arg("json"),
execs().with_status(0).with_json(r#"
{
"reason":"compiler-message",
"package_id":"bar 0.5.0 ([..])",
"target":{
"kind":["lib"],
"crate_types":["lib"],
"name":"bar",
"src_path":"[..]lib.rs"
},
"message":"{...}"
}
{
"reason":"compiler-artifact",
"profile": {
"debug_assertions": true,
"debuginfo": 2,
"opt_level": "0",
"overflow_checks": true,
"test": false
},
"features": [],
"package_id":"bar 0.5.0 ([..])",
"target":{
"kind":["lib"],
"crate_types":["lib"],
"name":"bar",
"src_path":"[..]lib.rs"
},
"filenames":["[..].rlib"],
"fresh": false
}
{
"reason":"compiler-message",
"package_id":"foo 0.5.0 ([..])",
"target":{
"kind":["bin"],
"crate_types":["bin"],
"name":"foo",
"src_path":"[..]main.rs"
},
"message":"{...}"
}
{
"reason":"compiler-artifact",
"package_id":"foo 0.5.0 ([..])",
"target":{
"kind":["bin"],
"crate_types":["bin"],
"name":"foo",
"src_path":"[..]main.rs"
},
"profile": {
"debug_assertions": true,
"debuginfo": 2,
"opt_level": "0",
"overflow_checks": true,
"test": false
},
"features": [],
"filenames": ["[..]"],
"fresh": false
}
"#));
// With fresh build, we should repeat the artifacts,
// but omit compiler warnings.
assert_that(p.cargo("build").arg("-v")
.arg("--message-format").arg("json"),
execs().with_status(0).with_json(r#"
{
"reason":"compiler-artifact",
"profile": {
"debug_assertions": true,
"debuginfo": 2,
"opt_level": "0",
"overflow_checks": true,
"test": false
},
"features": [],
"package_id":"bar 0.5.0 ([..])",
"target":{
"kind":["lib"],
"crate_types":["lib"],
"name":"bar",
"src_path":"[..]lib.rs"
},
"filenames":["[..].rlib"],
"fresh": true
}
{
"reason":"compiler-artifact",
"package_id":"foo 0.5.0 ([..])",
"target":{
"kind":["bin"],
"crate_types":["bin"],
"name":"foo",
"src_path":"[..]main.rs"
},
"profile": {
"debug_assertions": true,
"debuginfo": 2,
"opt_level": "0",
"overflow_checks": true,
"test": false
},
"features": [],
"filenames": ["[..]"],
"fresh": true
}
"#));
}
#[test]
fn wrong_message_format_option() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("build").arg("--message-format").arg("XML"),
execs().with_status(1)
.with_stderr_contains(
r#"[ERROR] Could not match 'xml' with any of the allowed variants: ["Human", "Json"]"#));
}
#[test]
fn message_format_json_forward_stderr() {
if is_nightly() { return }
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/main.rs", "fn main() { let unused = 0; }");
assert_that(p.cargo_process("rustc").arg("--bin").arg("foo")
.arg("--message-format").arg("JSON").arg("--").arg("-Zno-trans"),
execs().with_status(0)
.with_stderr_contains("[WARNING] the option `Z` is unstable [..]")
.with_json(r#"
{
"reason":"compiler-message",
"package_id":"foo 0.5.0 ([..])",
"target":{
"kind":["bin"],
"crate_types":["bin"],
"name":"foo",
"src_path":"[..]"
},
"message":"{...}"
}
{
"reason":"compiler-artifact",
"package_id":"foo 0.5.0 ([..])",
"target":{
"kind":["bin"],
"crate_types":["bin"],
"name":"foo",
"src_path":"[..]"
},
"profile":{
"debug_assertions":true,
"debuginfo":2,
"opt_level":"0",
"overflow_checks": true,
"test":false
},
"features":[],
"filenames":[],
"fresh": false
}
"#));
}
#[test]
fn no_warn_about_package_metadata() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
version = "0.0.1"
authors = []
[package.metadata]
foo = "bar"
a = true
b = 3
[package.metadata.another]
bar = 3
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build"),
execs().with_status(0)
.with_stderr("[..] foo v0.0.1 ([..])\n\
[FINISHED] dev [unoptimized + debuginfo] target(s) in [..]\n"));
}
#[test]
fn cargo_build_empty_target() {
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/main.rs", "fn main() {}");
assert_that(p.cargo_process("build").arg("--target").arg(""),
execs().with_status(101)
.with_stderr_contains("[..] target was empty"));
}
#[test]
fn build_all_workspace() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.1.0"
[dependencies]
bar = { path = "bar" }
[workspace]
"#)
.file("src/main.rs", r#"
fn main() {}
"#)
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.1.0"
"#)
.file("bar/src/lib.rs", r#"
pub fn bar() {}
"#);
assert_that(p.cargo_process("build")
.arg("--all"),
execs().with_status(0)
.with_stderr("[..] Compiling bar v0.1.0 ([..])\n\
[..] Compiling foo v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n"));
}
#[test]
fn build_all_workspace_implicit_examples() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
version = "0.1.0"
[dependencies]
bar = { path = "bar" }
[workspace]
"#)
.file("src/lib.rs", "")
.file("src/bin/a.rs", "fn main() {}")
.file("src/bin/b.rs", "fn main() {}")
.file("examples/c.rs", "fn main() {}")
.file("examples/d.rs", "fn main() {}")
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.1.0"
"#)
.file("bar/src/lib.rs", "")
.file("bar/src/bin/e.rs", "fn main() {}")
.file("bar/src/bin/f.rs", "fn main() {}")
.file("bar/examples/g.rs", "fn main() {}")
.file("bar/examples/h.rs", "fn main() {}");
assert_that(p.cargo_process("build")
.arg("--all").arg("--examples"),
execs().with_status(0)
.with_stderr("[..] Compiling bar v0.1.0 ([..])\n\
[..] Compiling foo v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n"));
assert_that(&p.bin("a"), is_not(existing_file()));
assert_that(&p.bin("b"), is_not(existing_file()));
assert_that(&p.bin("examples/c"), existing_file());
assert_that(&p.bin("examples/d"), existing_file());
assert_that(&p.bin("e"), is_not(existing_file()));
assert_that(&p.bin("f"), is_not(existing_file()));
assert_that(&p.bin("examples/g"), existing_file());
assert_that(&p.bin("examples/h"), existing_file());
}
#[test]
fn build_all_virtual_manifest() {
let p = project("workspace")
.file("Cargo.toml", r#"
[workspace]
members = ["foo", "bar"]
"#)
.file("foo/Cargo.toml", r#"
[project]
name = "foo"
version = "0.1.0"
"#)
.file("foo/src/lib.rs", r#"
pub fn foo() {}
"#)
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.1.0"
"#)
.file("bar/src/lib.rs", r#"
pub fn bar() {}
"#);
// The order in which foo and bar are built is not guaranteed
assert_that(p.cargo_process("build")
.arg("--all"),
execs().with_status(0)
.with_stderr_contains("[..] Compiling bar v0.1.0 ([..])")
.with_stderr_contains("[..] Compiling foo v0.1.0 ([..])")
.with_stderr("[..] Compiling [..] v0.1.0 ([..])\n\
[..] Compiling [..] v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n"));
}
#[test]
fn build_all_virtual_manifest_implicit_examples() {
let p = project("foo")
.file("Cargo.toml", r#"
[workspace]
members = ["foo", "bar"]
"#)
.file("foo/Cargo.toml", r#"
[project]
name = "foo"
version = "0.1.0"
"#)
.file("foo/src/lib.rs", "")
.file("foo/src/bin/a.rs", "fn main() {}")
.file("foo/src/bin/b.rs", "fn main() {}")
.file("foo/examples/c.rs", "fn main() {}")
.file("foo/examples/d.rs", "fn main() {}")
.file("bar/Cargo.toml", r#"
[project]
name = "bar"
version = "0.1.0"
"#)
.file("bar/src/lib.rs", "")
.file("bar/src/bin/e.rs", "fn main() {}")
.file("bar/src/bin/f.rs", "fn main() {}")
.file("bar/examples/g.rs", "fn main() {}")
.file("bar/examples/h.rs", "fn main() {}");
// The order in which foo and bar are built is not guaranteed
assert_that(p.cargo_process("build")
.arg("--all").arg("--examples"),
execs().with_status(0)
.with_stderr_contains("[..] Compiling bar v0.1.0 ([..])")
.with_stderr_contains("[..] Compiling foo v0.1.0 ([..])")
.with_stderr("[..] Compiling [..] v0.1.0 ([..])\n\
[..] Compiling [..] v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n"));
assert_that(&p.bin("a"), is_not(existing_file()));
assert_that(&p.bin("b"), is_not(existing_file()));
assert_that(&p.bin("examples/c"), existing_file());
assert_that(&p.bin("examples/d"), existing_file());
assert_that(&p.bin("e"), is_not(existing_file()));
assert_that(&p.bin("f"), is_not(existing_file()));
assert_that(&p.bin("examples/g"), existing_file());
assert_that(&p.bin("examples/h"), existing_file());
}
#[test]
fn build_all_member_dependency_same_name() {
let p = project("workspace")
.file("Cargo.toml", r#"
[workspace]
members = ["a"]
"#)
.file("a/Cargo.toml", r#"
[project]
name = "a"
version = "0.1.0"
[dependencies]
a = "0.1.0"
"#)
.file("a/src/lib.rs", r#"
pub fn a() {}
"#);
Package::new("a", "0.1.0").publish();
assert_that(p.cargo_process("build")
.arg("--all"),
execs().with_status(0)
.with_stderr("[..] Updating registry `[..]`\n\
[..] Downloading a v0.1.0 ([..])\n\
[..] Compiling a v0.1.0\n\
[..] Compiling a v0.1.0 ([..])\n\
[..] Finished dev [unoptimized + debuginfo] target(s) in [..]\n"));
}
#[test]
fn run_proper_binary() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[[bin]]
name = "main"
[[bin]]
name = "other"
"#)
.file("src/lib.rs", "")
.file("src/bin/main.rs", r#"
fn main() {
panic!("This should never be run.");
}
"#)
.file("src/bin/other.rs", r#"
fn main() {
}
"#);
assert_that(p.cargo_process("run").arg("--bin").arg("other"),
execs().with_status(0));
}
#[test]
fn run_proper_binary_main_rs() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[[bin]]
name = "foo"
"#)
.file("src/lib.rs", "")
.file("src/bin/main.rs", r#"
fn main() {
}
"#);
assert_that(p.cargo_process("run").arg("--bin").arg("foo"),
execs().with_status(0));
}
#[test]
fn run_proper_binary_main_rs_as_foo() {
let p = project("foo")
.file("Cargo.toml", r#"
[package]
name = "foo"
authors = []
version = "0.0.0"
[[bin]]
name = "foo"
"#)
.file("src/foo.rs", r#"
fn main() {
panic!("This should never be run.");
}
"#)
.file("src/main.rs", r#"
fn main() {
}
"#);
assert_that(p.cargo_process("run").arg("--bin").arg("foo"),
execs().with_status(0));
}
#[test]
fn rustc_wrapper() {
// We don't have /usr/bin/env on Windows.
if cfg!(windows) { return }
let p = project("foo")
.file("Cargo.toml", &basic_bin_manifest("foo"))
.file("src/foo.rs", &main_file(r#""i am foo""#, &[]));
assert_that(p.cargo_process("build").arg("-v").env("RUSTC_WRAPPER", "/usr/bin/env"),
execs().with_stderr_contains(
"[RUNNING] `/usr/bin/env rustc --crate-name foo [..]")
.with_status(0));
}
#[test]
fn cdylib_not_lifted() {
let p = project("foo")
.file("Cargo.toml", r#"
[project]
name = "foo"
authors = []
version = "0.1.0"
[lib]
crate-type = ["cdylib"]
"#)
.file("src/lib.rs", "");
assert_that(p.cargo_process("build"), execs().with_status(0));
let files = if cfg!(windows) {
vec!["foo.dll.lib", "foo.dll.exp", "foo.dll"]
} else if cfg!(target_os = "macos") {
vec!["libfoo.dylib"]
} else {
vec!["libfoo.so"]
};
for file in files {
println!("checking: {}", file);
assert_that(&p.root().join("target/debug/deps").join(&file),
existing_file());
}
}
| 27.794681 | 93 | 0.471415 |
166813883f4e01b084af0dc663591b18eb79eef7
| 862 |
extern crate rand;
use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn main() {
println!("Guess the number!");
let secret_number = rand::thread_rng().gen_range(1, 101);
println!("The secret number is: {}", secret_number);
loop {
println!("Please input your guess.");
let mut guess = String::new();
io::stdin().read_line(&mut guess).expect("Failed to read line");
let guess: u32 = match guess.trim().parse() {
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", guess);
match guess.cmp(&secret_number) {
Ordering::Less => println!("Too small!"),
Ordering::Greater => println!("Too big!"),
Ordering::Equal => {
println!("You win!");
break;
},
}
}
}
| 23.944444 | 72 | 0.508121 |
56f0e667f2f3547ec4e1c1eac2bb57ef76d2b202
| 2,750 |
use crate::{
config::{DataType, SinkConfig, SinkContext, SinkDescription},
event::log_schema,
sinks::util::{
encoding::{EncodingConfig, EncodingConfiguration},
tcp::TcpSink,
Encoding, UriSerde,
},
tls::{MaybeTlsSettings, TlsSettings},
};
use bytes::Bytes;
use futures01::{stream::iter_ok, Sink};
use serde::{Deserialize, Serialize};
use syslog::{Facility, Formatter3164, LogFormat, Severity};
#[derive(Deserialize, Serialize, Debug)]
#[serde(deny_unknown_fields)]
pub struct PapertrailConfig {
endpoint: UriSerde,
encoding: EncodingConfig<Encoding>,
}
inventory::submit! {
SinkDescription::new_without_default::<PapertrailConfig>("papertrail")
}
#[typetag::serde(name = "papertrail")]
impl SinkConfig for PapertrailConfig {
fn build(&self, cx: SinkContext) -> crate::Result<(super::RouterSink, super::Healthcheck)> {
let host = self
.endpoint
.host()
.map(str::to_string)
.ok_or_else(|| "A host is required for endpoints".to_string())?;
let port = self
.endpoint
.port_u16()
.ok_or_else(|| "A port is required for endpoints".to_string())?;
let sink = TcpSink::new(
host,
port,
cx.resolver(),
MaybeTlsSettings::Tls(TlsSettings::default()),
);
let healthcheck = sink.healthcheck();
let pid = std::process::id();
let encoding = self.encoding.clone();
let sink = sink.with_flat_map(move |e| iter_ok(encode_event(e, pid, &encoding)));
Ok((Box::new(sink), Box::new(healthcheck)))
}
fn input_type(&self) -> DataType {
DataType::Log
}
fn sink_type(&self) -> &'static str {
"papertrail"
}
}
fn encode_event(
mut event: crate::Event,
pid: u32,
encoding: &EncodingConfig<Encoding>,
) -> Option<Bytes> {
encoding.apply_rules(&mut event);
let host = if let Some(host) = event.as_mut_log().remove(log_schema().host_key()) {
Some(host.to_string_lossy())
} else {
None
};
let formatter = Formatter3164 {
facility: Facility::LOG_USER,
hostname: host,
process: "vector".into(),
pid: pid as i32,
};
let mut s: Vec<u8> = Vec::new();
let log = event.into_log();
let message = match encoding.codec() {
Encoding::Json => serde_json::to_string(&log).unwrap(),
Encoding::Text => log
.get(&log_schema().message_key())
.map(|v| v.to_string_lossy())
.unwrap_or_default(),
};
formatter
.format(&mut s, Severity::LOG_INFO, message)
.unwrap();
s.push(b'\n');
Some(Bytes::from(s))
}
| 25.943396 | 96 | 0.589455 |
7220d6e0a7e7e167c5242a1b5be00a1099a5ec94
| 18,139 |
//! This module is concerned with finding methods that a given type provides.
//! For details about how this works in rustc, see the method lookup page in the
//! [rustc guide](https://rust-lang.github.io/rustc-guide/method-lookup.html)
//! and the corresponding code mostly in librustc_typeck/check/method/probe.rs.
use std::sync::Arc;
use arrayvec::ArrayVec;
use hir_def::{
lang_item::LangItemTarget, resolver::Resolver, type_ref::Mutability, AssocItemId, AstItemDef,
FunctionId, HasModule, ImplId, Lookup, TraitId,
};
use hir_expand::name::Name;
use ra_db::CrateId;
use ra_prof::profile;
use rustc_hash::FxHashMap;
use super::Substs;
use crate::{
autoderef,
db::HirDatabase,
primitive::{FloatBitness, Uncertain},
utils::all_super_traits,
Canonical, InEnvironment, TraitEnvironment, TraitRef, Ty, TypeCtor, TypeWalk,
};
/// This is used as a key for indexing impls.
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
pub enum TyFingerprint {
Apply(TypeCtor),
}
impl TyFingerprint {
/// Creates a TyFingerprint for looking up an impl. Only certain types can
/// have impls: if we have some `struct S`, we can have an `impl S`, but not
/// `impl &S`. Hence, this will return `None` for reference types and such.
fn for_impl(ty: &Ty) -> Option<TyFingerprint> {
match ty {
Ty::Apply(a_ty) => Some(TyFingerprint::Apply(a_ty.ctor)),
_ => None,
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct CrateImplBlocks {
impls: FxHashMap<TyFingerprint, Vec<ImplId>>,
impls_by_trait: FxHashMap<TraitId, Vec<ImplId>>,
}
impl CrateImplBlocks {
pub(crate) fn impls_in_crate_query(
db: &impl HirDatabase,
krate: CrateId,
) -> Arc<CrateImplBlocks> {
let _p = profile("impls_in_crate_query");
let mut res =
CrateImplBlocks { impls: FxHashMap::default(), impls_by_trait: FxHashMap::default() };
let crate_def_map = db.crate_def_map(krate);
for (_module_id, module_data) in crate_def_map.modules.iter() {
for &impl_id in module_data.impls.iter() {
match db.impl_trait(impl_id) {
Some(tr) => {
res.impls_by_trait.entry(tr.trait_).or_default().push(impl_id);
}
None => {
let self_ty = db.impl_self_ty(impl_id);
if let Some(self_ty_fp) = TyFingerprint::for_impl(&self_ty) {
res.impls.entry(self_ty_fp).or_default().push(impl_id);
}
}
}
}
}
Arc::new(res)
}
pub fn lookup_impl_blocks(&self, ty: &Ty) -> impl Iterator<Item = ImplId> + '_ {
let fingerprint = TyFingerprint::for_impl(ty);
fingerprint.and_then(|f| self.impls.get(&f)).into_iter().flatten().copied()
}
pub fn lookup_impl_blocks_for_trait(&self, tr: TraitId) -> impl Iterator<Item = ImplId> + '_ {
self.impls_by_trait.get(&tr).into_iter().flatten().copied()
}
pub fn all_impls<'a>(&'a self) -> impl Iterator<Item = ImplId> + 'a {
self.impls.values().chain(self.impls_by_trait.values()).flatten().copied()
}
}
impl Ty {
pub fn def_crates(
&self,
db: &impl HirDatabase,
cur_crate: CrateId,
) -> Option<ArrayVec<[CrateId; 2]>> {
// Types like slice can have inherent impls in several crates, (core and alloc).
// The corresponding impls are marked with lang items, so we can use them to find the required crates.
macro_rules! lang_item_crate {
($($name:expr),+ $(,)?) => {{
let mut v = ArrayVec::<[LangItemTarget; 2]>::new();
$(
v.extend(db.lang_item(cur_crate, $name.into()));
)+
v
}};
}
let lang_item_targets = match self {
Ty::Apply(a_ty) => match a_ty.ctor {
TypeCtor::Adt(def_id) => {
return Some(std::iter::once(def_id.module(db).krate).collect())
}
TypeCtor::Bool => lang_item_crate!("bool"),
TypeCtor::Char => lang_item_crate!("char"),
TypeCtor::Float(Uncertain::Known(f)) => match f.bitness {
// There are two lang items: one in libcore (fXX) and one in libstd (fXX_runtime)
FloatBitness::X32 => lang_item_crate!("f32", "f32_runtime"),
FloatBitness::X64 => lang_item_crate!("f64", "f64_runtime"),
},
TypeCtor::Int(Uncertain::Known(i)) => lang_item_crate!(i.ty_to_string()),
TypeCtor::Str => lang_item_crate!("str_alloc", "str"),
TypeCtor::Slice => lang_item_crate!("slice_alloc", "slice"),
TypeCtor::RawPtr(Mutability::Shared) => lang_item_crate!("const_ptr"),
TypeCtor::RawPtr(Mutability::Mut) => lang_item_crate!("mut_ptr"),
_ => return None,
},
_ => return None,
};
let res = lang_item_targets
.into_iter()
.filter_map(|it| match it {
LangItemTarget::ImplBlockId(it) => Some(it),
_ => None,
})
.map(|it| it.module(db).krate)
.collect();
Some(res)
}
}
/// Look up the method with the given name, returning the actual autoderefed
/// receiver type (but without autoref applied yet).
pub(crate) fn lookup_method(
ty: &Canonical<Ty>,
db: &impl HirDatabase,
name: &Name,
resolver: &Resolver,
) -> Option<(Ty, FunctionId)> {
iterate_method_candidates(ty, db, resolver, Some(name), LookupMode::MethodCall, |ty, f| match f
{
AssocItemId::FunctionId(f) => Some((ty.clone(), f)),
_ => None,
})
}
/// Whether we're looking up a dotted method call (like `v.len()`) or a path
/// (like `Vec::new`).
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum LookupMode {
/// Looking up a method call like `v.len()`: We only consider candidates
/// that have a `self` parameter, and do autoderef.
MethodCall,
/// Looking up a path like `Vec::new` or `Vec::default`: We consider all
/// candidates including associated constants, but don't do autoderef.
Path,
}
// This would be nicer if it just returned an iterator, but that runs into
// lifetime problems, because we need to borrow temp `CrateImplBlocks`.
// FIXME add a context type here?
pub fn iterate_method_candidates<T>(
ty: &Canonical<Ty>,
db: &impl HirDatabase,
resolver: &Resolver,
name: Option<&Name>,
mode: LookupMode,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
match mode {
LookupMode::MethodCall => {
// For method calls, rust first does any number of autoderef, and then one
// autoref (i.e. when the method takes &self or &mut self). We just ignore
// the autoref currently -- when we find a method matching the given name,
// we assume it fits.
// Also note that when we've got a receiver like &S, even if the method we
// find in the end takes &self, we still do the autoderef step (just as
// rustc does an autoderef and then autoref again).
let environment = TraitEnvironment::lower(db, resolver);
let ty = InEnvironment { value: ty.clone(), environment };
let krate = resolver.krate()?;
// We have to be careful about the order we're looking at candidates
// in here. Consider the case where we're resolving `x.clone()`
// where `x: &Vec<_>`. This resolves to the clone method with self
// type `Vec<_>`, *not* `&_`. I.e. we need to consider methods where
// the receiver type exactly matches before cases where we have to
// do autoref. But in the autoderef steps, the `&_` self type comes
// up *before* the `Vec<_>` self type.
//
// On the other hand, we don't want to just pick any by-value method
// before any by-autoref method; it's just that we need to consider
// the methods by autoderef order of *receiver types*, not *self
// types*.
let deref_chain: Vec<_> = autoderef::autoderef(db, Some(krate), ty.clone()).collect();
for i in 0..deref_chain.len() {
if let Some(result) = iterate_method_candidates_with_autoref(
&deref_chain[i..],
db,
resolver,
name,
&mut callback,
) {
return Some(result);
}
}
None
}
LookupMode::Path => {
// No autoderef for path lookups
iterate_method_candidates_for_self_ty(&ty, db, resolver, name, &mut callback)
}
}
}
fn iterate_method_candidates_with_autoref<T>(
deref_chain: &[Canonical<Ty>],
db: &impl HirDatabase,
resolver: &Resolver,
name: Option<&Name>,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
if let Some(result) = iterate_method_candidates_by_receiver(
&deref_chain[0],
&deref_chain[1..],
db,
resolver,
name,
&mut callback,
) {
return Some(result);
}
let refed = Canonical {
num_vars: deref_chain[0].num_vars,
value: Ty::apply_one(TypeCtor::Ref(Mutability::Shared), deref_chain[0].value.clone()),
};
if let Some(result) = iterate_method_candidates_by_receiver(
&refed,
deref_chain,
db,
resolver,
name,
&mut callback,
) {
return Some(result);
}
let ref_muted = Canonical {
num_vars: deref_chain[0].num_vars,
value: Ty::apply_one(TypeCtor::Ref(Mutability::Mut), deref_chain[0].value.clone()),
};
if let Some(result) = iterate_method_candidates_by_receiver(
&ref_muted,
deref_chain,
db,
resolver,
name,
&mut callback,
) {
return Some(result);
}
None
}
fn iterate_method_candidates_by_receiver<T>(
receiver_ty: &Canonical<Ty>,
rest_of_deref_chain: &[Canonical<Ty>],
db: &impl HirDatabase,
resolver: &Resolver,
name: Option<&Name>,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
// We're looking for methods with *receiver* type receiver_ty. These could
// be found in any of the derefs of receiver_ty, so we have to go through
// that.
let krate = resolver.krate()?;
for self_ty in std::iter::once(receiver_ty).chain(rest_of_deref_chain) {
if let Some(result) =
iterate_inherent_methods(self_ty, db, name, Some(receiver_ty), krate, &mut callback)
{
return Some(result);
}
}
for self_ty in std::iter::once(receiver_ty).chain(rest_of_deref_chain) {
if let Some(result) = iterate_trait_method_candidates(
self_ty,
db,
resolver,
name,
Some(receiver_ty),
&mut callback,
) {
return Some(result);
}
}
None
}
fn iterate_method_candidates_for_self_ty<T>(
self_ty: &Canonical<Ty>,
db: &impl HirDatabase,
resolver: &Resolver,
name: Option<&Name>,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
let krate = resolver.krate()?;
if let Some(result) = iterate_inherent_methods(self_ty, db, name, None, krate, &mut callback) {
return Some(result);
}
if let Some(result) =
iterate_trait_method_candidates(self_ty, db, resolver, name, None, &mut callback)
{
return Some(result);
}
None
}
fn iterate_trait_method_candidates<T>(
self_ty: &Canonical<Ty>,
db: &impl HirDatabase,
resolver: &Resolver,
name: Option<&Name>,
receiver_ty: Option<&Canonical<Ty>>,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
let krate = resolver.krate()?;
// FIXME: maybe put the trait_env behind a query (need to figure out good input parameters for that)
let env = TraitEnvironment::lower(db, resolver);
// if ty is `impl Trait` or `dyn Trait`, the trait doesn't need to be in scope
let inherent_trait = self_ty.value.inherent_trait().into_iter();
// if we have `T: Trait` in the param env, the trait doesn't need to be in scope
let traits_from_env = env
.trait_predicates_for_self_ty(&self_ty.value)
.map(|tr| tr.trait_)
.flat_map(|t| all_super_traits(db, t));
let traits =
inherent_trait.chain(traits_from_env).chain(resolver.traits_in_scope(db).into_iter());
'traits: for t in traits {
let data = db.trait_data(t);
// we'll be lazy about checking whether the type implements the
// trait, but if we find out it doesn't, we'll skip the rest of the
// iteration
let mut known_implemented = false;
for (_name, item) in data.items.iter() {
if !is_valid_candidate(db, name, receiver_ty, (*item).into(), self_ty) {
continue;
}
if !known_implemented {
let goal = generic_implements_goal(db, env.clone(), t, self_ty.clone());
if db.trait_solve(krate.into(), goal).is_none() {
continue 'traits;
}
}
known_implemented = true;
if let Some(result) = callback(&self_ty.value, (*item).into()) {
return Some(result);
}
}
}
None
}
fn iterate_inherent_methods<T>(
self_ty: &Canonical<Ty>,
db: &impl HirDatabase,
name: Option<&Name>,
receiver_ty: Option<&Canonical<Ty>>,
krate: CrateId,
mut callback: impl FnMut(&Ty, AssocItemId) -> Option<T>,
) -> Option<T> {
for krate in self_ty.value.def_crates(db, krate)? {
let impls = db.impls_in_crate(krate);
for impl_block in impls.lookup_impl_blocks(&self_ty.value) {
for &item in db.impl_data(impl_block).items.iter() {
if !is_valid_candidate(db, name, receiver_ty, item, self_ty) {
continue;
}
if let Some(result) = callback(&self_ty.value, item) {
return Some(result);
}
}
}
}
None
}
fn is_valid_candidate(
db: &impl HirDatabase,
name: Option<&Name>,
receiver_ty: Option<&Canonical<Ty>>,
item: AssocItemId,
self_ty: &Canonical<Ty>,
) -> bool {
match item {
AssocItemId::FunctionId(m) => {
let data = db.function_data(m);
if let Some(name) = name {
if &data.name != name {
return false;
}
}
if let Some(receiver_ty) = receiver_ty {
if !data.has_self_param {
return false;
}
let transformed_receiver_ty = match transform_receiver_ty(db, m, self_ty) {
Some(ty) => ty,
None => return false,
};
if transformed_receiver_ty != receiver_ty.value {
return false;
}
}
true
}
AssocItemId::ConstId(c) => {
let data = db.const_data(c);
name.map_or(true, |name| data.name.as_ref() == Some(name)) && receiver_ty.is_none()
}
_ => false,
}
}
pub(crate) fn inherent_impl_substs(
db: &impl HirDatabase,
impl_id: ImplId,
self_ty: &Canonical<Ty>,
) -> Option<Substs> {
let vars = Substs::build_for_def(db, impl_id).fill_with_bound_vars(0).build();
let self_ty_with_vars = db.impl_self_ty(impl_id).subst(&vars);
let self_ty_with_vars = Canonical { num_vars: vars.len(), value: self_ty_with_vars };
super::infer::unify(&self_ty_with_vars, self_ty)
}
fn transform_receiver_ty(
db: &impl HirDatabase,
function_id: FunctionId,
self_ty: &Canonical<Ty>,
) -> Option<Ty> {
let substs = match function_id.lookup(db).container {
hir_def::ContainerId::TraitId(_) => Substs::build_for_def(db, function_id)
.push(self_ty.value.clone())
.fill_with_unknown()
.build(),
hir_def::ContainerId::ImplId(impl_id) => inherent_impl_substs(db, impl_id, &self_ty)?,
hir_def::ContainerId::ModuleId(_) => unreachable!(),
};
let sig = db.callable_item_signature(function_id.into());
Some(sig.params()[0].clone().subst(&substs))
}
pub fn implements_trait(
ty: &Canonical<Ty>,
db: &impl HirDatabase,
resolver: &Resolver,
krate: CrateId,
trait_: TraitId,
) -> bool {
if ty.value.inherent_trait() == Some(trait_) {
// FIXME this is a bit of a hack, since Chalk should say the same thing
// anyway, but currently Chalk doesn't implement `dyn/impl Trait` yet
return true;
}
let env = TraitEnvironment::lower(db, resolver);
let goal = generic_implements_goal(db, env, trait_, ty.clone());
let solution = db.trait_solve(krate.into(), goal);
solution.is_some()
}
/// This creates Substs for a trait with the given Self type and type variables
/// for all other parameters, to query Chalk with it.
fn generic_implements_goal(
db: &impl HirDatabase,
env: Arc<TraitEnvironment>,
trait_: TraitId,
self_ty: Canonical<Ty>,
) -> Canonical<InEnvironment<super::Obligation>> {
let num_vars = self_ty.num_vars;
let substs = super::Substs::build_for_def(db, trait_)
.push(self_ty.value)
.fill_with_bound_vars(num_vars as u32)
.build();
let num_vars = substs.len() - 1 + self_ty.num_vars;
let trait_ref = TraitRef { trait_, substs };
let obligation = super::Obligation::Trait(trait_ref);
Canonical { num_vars, value: InEnvironment::new(env, obligation) }
}
| 36.133466 | 110 | 0.586967 |
5b54f196195a4dfca633238b81f39e19061e3660
| 1,931 |
use crate::app;
use ash::version::DeviceV1_0;
use ash::vk;
pub fn create_render_pass(
app_data: &mut app::AppData,
vulkan_data: &app::VulkanInitData,
) -> app::AppResult {
let device = vulkan_data.get_device_ref();
let mut attachment_descriptions = Vec::new();
attachment_descriptions.push(
vk::AttachmentDescription::builder()
.format(vulkan_data.surface_format.format)
.samples(vk::SampleCountFlags::TYPE_1)
.load_op(vk::AttachmentLoadOp::CLEAR)
.store_op(vk::AttachmentStoreOp::STORE)
.stencil_load_op(vk::AttachmentLoadOp::DONT_CARE)
.stencil_store_op(vk::AttachmentStoreOp::DONT_CARE)
.initial_layout(vk::ImageLayout::UNDEFINED)
.final_layout(vk::ImageLayout::PRESENT_SRC_KHR)
.build(),
);
let col_attachment_ref = vk::AttachmentReference::builder()
.attachment(0)
.layout(vk::ImageLayout::COLOR_ATTACHMENT_OPTIMAL)
.build();
let references = [col_attachment_ref];
let mut subpass_descriptions = Vec::new();
subpass_descriptions.push(
vk::SubpassDescription::builder()
.pipeline_bind_point(vk::PipelineBindPoint::GRAPHICS)
.color_attachments(&references)
.build(),
);
let create_info = vk::RenderPassCreateInfo::builder()
.attachments(&attachment_descriptions)
.subpasses(&subpass_descriptions);
app_data.render_pass = match unsafe { device.create_render_pass(&create_info, None) } {
Ok(rp) => rp,
Err(_) => return Err(String::from("failed to create render pass")),
};
if let Some(debug_utils) = vulkan_data.debug_utils_loader.as_ref() {
app::set_debug_utils_object_name(
debug_utils,
device.handle(),
app_data.render_pass,
String::from("render pass"),
);
}
Ok(())
}
| 31.145161 | 91 | 0.634904 |
917c0c92a31e410e5fa0ba3c11897c75fa6ff883
| 566 |
fn main()
{
let mut str = std::string::String::new();
std::io::stdin().read_line( & mut str )
.expect( "Could not read line for some reason" );
let mut ctr: usize = 0;
let str = str.trim();
for _ in str.chars() {
ctr += 1;
}
println!( "{}", ctr );
}
/*
enum Coin
{
Penny,
Nickel,
Dime,
Quarter,
}
fn get_in_cents( coin: Coin ) -> u32
{
match coin {
Coin::Penny => 1,
Coin::Nickel => 5,
Coin::Dime => 10,
Coin::Quarter => 25,
}
}
fn main()
{
let coin = Coin::Nickel;
if let coin = Coin::Nickel {
println!( "Yo!" );
}
}
*/
| 12.042553 | 51 | 0.54417 |
69e89b206ea013658bb7b17859ac292b30873900
| 1,822 |
use std::sync::Arc;
use std::time::{Duration, Instant};
use arc_swap::ArcSwapOption;
use bytes::Bytes;
use headers::ContentType;
use ftl::*;
use crate::{
metric::{MemoryMetrics, Metrics, API_METRICS, MEMORY_METRICS},
web::encoding::{bytes_as_json, bytes_as_msgpack, Encoding, EncodingQuery},
ServerState,
};
#[derive(Serialize)]
struct AllMetrics {
percentiles: [u16; 3],
memory: &'static MemoryMetrics,
api: &'static Metrics,
}
struct MetricsCache {
ts: Instant,
json: Bytes,
msgpack: Bytes,
}
impl MetricsCache {
pub fn at(ts: Instant) -> Self {
let metrics = AllMetrics {
percentiles: API_METRICS.percentiles(),
memory: &MEMORY_METRICS,
api: &API_METRICS,
};
Self {
ts,
json: serde_json::to_vec(&metrics).unwrap().into(),
msgpack: rmp_serde::to_vec(&metrics).unwrap().into(),
}
}
}
static CACHE: ArcSwapOption<MetricsCache> = ArcSwapOption::const_empty();
pub fn metrics(route: Route<ServerState>) -> Response {
#[cfg(debug_assertions)]
const REFRESH_DURATION: Duration = Duration::from_secs(5);
#[cfg(not(debug_assertions))]
const REFRESH_DURATION: Duration = Duration::from_secs(60);
let cache = CACHE.load();
match &*cache {
Some(cache) if route.start.duration_since(cache.ts) < REFRESH_DURATION => {
match route.query::<EncodingQuery>() {
Some(Ok(EncodingQuery {
encoding: Encoding::MsgPack,
})) => bytes_as_msgpack(cache.msgpack.clone()),
_ => bytes_as_json(cache.json.clone()),
}
}
_ => {
CACHE.store(Some(Arc::new(MetricsCache::at(route.start))));
return metrics(route);
}
}
}
| 26.028571 | 83 | 0.597695 |
c13302dcd581c68f0acc4ad5d81a30bcb959b786
| 537 |
tonic::include_proto!("ipd");
use std::fmt;
impl From<i32> for Action {
fn from(i: i32) -> Self {
match i {
1 => Action::Cooperate,
2 => Action::Defect,
_ => Action::Null,
}
}
}
impl fmt::Display for Action {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let text = match self {
Action::Null => "NULL",
Action::Cooperate => "COOPERATE",
Action::Defect => "DEFECT",
};
write!(f, "{}", text)
}
}
| 22.375 | 62 | 0.465549 |
013db59b09a0ba07b517a908a2fd28cc10a2c1e6
| 8,816 |
#[macro_use] extern crate log;
extern crate byteorder;
extern crate mydht_base;
extern crate vec_map;
extern crate mio;
#[cfg(test)]
extern crate mydht_basetest;
/*use std::sync::mpsc;
use std::sync::mpsc::{Sender};
use std::result::Result as StdResult;
use std::mem;*/
//use self::byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io::Result as IoResult;
use mydht_base::mydhtresult::Result;
/*use std::io::Error as IoError;
use std::io::ErrorKind as IoErrorKind;
use std::io::Write;
use std::io::Read;*/
use std::time::Duration as StdDuration;
use mydht_base::transport::{
Transport,
Registerable,
Token,
Ready,
};
use std::net::SocketAddr;
//use self::mio::tcp::TcpSocket;
use self::mio::net::TcpListener;
use self::mio::net::TcpStream;
//use self::mio::tcp;
use self::mio::Token as MioToken;
use self::mio::Poll;
use self::mio::Ready as MioReady;
use self::mio::PollOpt;
//use super::Attachment;
//use std::sync::Mutex;
//use std::sync::Arc;
//use std::sync::Condvar;
//use std::sync::PoisonError;
//use std::os::unix::io::AsRawFd;
//use std::os::unix::io::FromRawFd;
use mydht_base::transport::{
SerSocketAddr,
LoopResult,
};
#[cfg(test)]
use mydht_basetest::transport::{
reg_mpsc_recv_test as reg_mpsc_recv_test_base,
reg_connect_2 as reg_connect_2_base,
reg_rw_testing,
reg_rw_corout_testing,
reg_rw_cpupool_testing,
reg_rw_threadpark_testing,
};
#[cfg(feature="with-extra-test")]
#[cfg(test)]
use mydht_base::utils::{sa4};
#[cfg(feature="with-extra-test")]
#[cfg(test)]
use std::net::Ipv4Addr;
/// Tcp struct : two options, timeout for connect and time out when connected.
pub struct Tcp {
keepalive : Option<StdDuration>,
listener : TcpListener,
mult : bool,
}
impl Tcp {
/// constructor.
pub fn new (p : &SocketAddr, keepalive : Option<StdDuration>, mult : bool) -> IoResult<Tcp> {
let tcplistener = TcpListener::bind(p)?;
Ok(Tcp {
keepalive : keepalive,
listener : tcplistener,
mult : mult,
})
}
}
impl Transport<Poll> for Tcp {
type ReadStream = TcpStream;
type WriteStream = TcpStream;
type Address = SerSocketAddr;
fn accept(&self) -> Result<(Self::ReadStream, Option<Self::WriteStream>)> {
let (s,ad) = self.listener.accept()?;
debug!("Initiating socket exchange : ");
debug!(" - From {:?}", s.local_addr());
debug!(" - With {:?}", s.peer_addr());
debug!(" - At {:?}", ad);
s.set_keepalive(self.keepalive)?;
// try!(s.set_write_timeout(self.timeout.num_seconds().to_u64().map(Duration::from_secs)));
if self.mult {
// try!(s.set_keepalive (self.timeout.num_seconds().to_u32()));
let rs = try!(s.try_clone());
Ok((s,Some(rs)))
} else {
Ok((s,None))
}
}
fn connectwith(&self, p : &SerSocketAddr) -> IoResult<(Self::WriteStream, Option<Self::ReadStream>)> {
let s = TcpStream::connect(&p.0)?;
s.set_keepalive(self.keepalive)?;
// TODO set nodelay and others!!
// try!(s.set_keepalive (self.timeout.num_seconds().to_u32()));
if self.mult {
let rs = try!(s.try_clone());
Ok((s,Some(rs)))
} else {
Ok((s,None))
}
}
}
impl Registerable<Poll> for Tcp {
fn register(&self, poll : &Poll, token: Token, interest: Ready) -> LoopResult<bool> {
match interest {
Ready::Readable =>
poll.register(&self.listener, MioToken(token), MioReady::readable(), PollOpt::edge())?,
Ready::Writable =>
poll.register(&self.listener, MioToken(token), MioReady::writable(), PollOpt::edge())?,
}
Ok(true)
}
fn reregister(&self, poll : &Poll, token: Token, interest: Ready) -> LoopResult<bool> {
match interest {
Ready::Readable =>
poll.reregister(&self.listener, MioToken(token), MioReady::readable(), PollOpt::edge())?,
Ready::Writable =>
poll.reregister(&self.listener, MioToken(token), MioReady::writable(), PollOpt::edge())?,
}
Ok(true)
}
fn deregister(&self, poll : &Poll) -> LoopResult<()> {
poll.deregister(&self.listener)?;
Ok(())
}
}
/*tttttttttt
#[cfg(feature="with-extra-test")]
#[test]
fn connect_rw () {
let start_port = 40000;
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let a2 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let tcp_transport_1 : Tcp = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
let tcp_transport_2 : Tcp = Tcp::new (&a2, Some(StdDuration::from_secs(5)), true).unwrap();
// TODO test with spawn rewrite test without start but
connect_rw_with_optional(tcp_transport_1,tcp_transport_2,&a1,&a2,true,true);
}*/
#[test]
fn reg_mpsc_recv_test() {
let start_port = 40010;
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
reg_mpsc_recv_test_base(t);
}
#[test]
fn reg_connect_2() {
let start_port = 40020;
let a0 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t0 = Tcp::new (&a0, Some(StdDuration::from_secs(5)), true).unwrap();
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let t1 = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
let a2 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+2));
let t2 = Tcp::new (&a2, Some(StdDuration::from_secs(5)), true).unwrap();
reg_connect_2_base(&a0,t0,t1,t2);
}
#[test]
fn reg_rw_state1() {
reg_rw_state(40030,120,120,120,2);
}
#[test]
fn reg_rw_state2() {
reg_rw_state(40040,240,120,120,2);
}
#[test]
fn reg_rw_state3() {
reg_rw_state(40050,240,250,50,2);
}
#[test]
fn reg_rw_state4() {
reg_rw_state(40060,240,50,250,2);
}
#[cfg(test)]
fn reg_rw_state(start_port : u16, content_size : usize, read_buf : usize, write_buf : usize, nbmess : usize ) {
let a0 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t0 = Tcp::new (&a0, Some(StdDuration::from_secs(5)), true).unwrap();
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let t1 = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
// content, read buf size , write buf size, and nb send
reg_rw_testing(a0,t0,a1,t1,content_size,read_buf,write_buf,nbmess);
}
#[cfg(test)]
fn reg_rw_corout(start_port : u16, content_size : usize, read_buf : usize, write_buf : usize, nbmess : usize ) {
let a0 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t0 = Tcp::new (&a0, Some(StdDuration::from_secs(5)), true).unwrap();
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let t1 = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
// content, read buf size , write buf size, and nb send
reg_rw_corout_testing(a0,t0,a1,t1,content_size,read_buf,write_buf,nbmess);
}
#[test]
fn reg_rw_corout1() {
reg_rw_corout(40070,120,120,120,2);
}
#[test]
fn reg_rw_corout2() {
reg_rw_corout(40080,240,120,120,2);
}
#[test]
fn reg_rw_corout3() {
reg_rw_corout(40090,240,250,50,3);
}
#[test]
fn reg_rw_corout4() {
reg_rw_corout(40100,240,50,250,2);
}
#[cfg(test)]
fn reg_rw_cpupool(start_port : u16, content_size : usize, read_buf : usize, write_buf : usize, nbmess : usize, poolsize : usize ) {
let a0 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t0 = Tcp::new (&a0, Some(StdDuration::from_secs(5)), true).unwrap();
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let t1 = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
// content, read buf size , write buf size, and nb send
reg_rw_cpupool_testing(a0,t0,a1,t1,content_size,read_buf,write_buf,nbmess,poolsize);
}
#[test]
fn reg_rw_cpupool1() {
reg_rw_cpupool(40110,120,120,120,2,2);
}
#[test]
fn reg_rw_cpupool2() {
reg_rw_cpupool(40120,240,120,120,2,2);
}
#[test]
fn reg_rw_cpupool3() {
reg_rw_cpupool(40130,240,250,50,20,2);
}
#[test]
fn reg_rw_cpupool4() {
reg_rw_cpupool(40140,240,50,250,2,3);
}
#[cfg(test)]
fn reg_rw_threadpark(start_port : u16, content_size : usize, read_buf : usize, write_buf : usize, nbmess : usize ) {
let a0 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port));
let t0 = Tcp::new (&a0, Some(StdDuration::from_secs(5)), true).unwrap();
let a1 = SerSocketAddr(sa4(Ipv4Addr::new(127,0,0,1), start_port+1));
let t1 = Tcp::new (&a1, Some(StdDuration::from_secs(5)), true).unwrap();
// content, read buf size , write buf size, and nb send
reg_rw_threadpark_testing(a0,t0,a1,t1,content_size,read_buf,write_buf,nbmess);
}
#[test]
fn reg_rw_threadpark1() {
reg_rw_threadpark(40210,120,120,120,2);
}
#[test]
fn reg_rw_threadpark2() {
reg_rw_threadpark(40220,240,120,120,2);
}
#[test]
fn reg_rw_threadpark3() {
reg_rw_threadpark(40230,240,250,50,20);
}
#[test]
fn reg_rw_threadpark4() {
reg_rw_threadpark(40240,240,50,250,2);
}
| 28.623377 | 131 | 0.670485 |
dd73c8ab100fa0f2ca57e77447f59e04dfbf9b94
| 214,668 |
#![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use crate::models::*;
pub mod autoscale_settings {
use crate::models::*;
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<AutoscaleSettingResourceCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
autoscale_setting_name: &str,
subscription_id: &str,
) -> std::result::Result<AutoscaleSettingResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
autoscale_setting_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
autoscale_setting_name: &str,
parameters: &AutoscaleSettingResource,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
autoscale_setting_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(AutoscaleSettingResource),
Created201(AutoscaleSettingResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
autoscale_setting_name: &str,
autoscale_setting_resource: &AutoscaleSettingResourcePatch,
) -> std::result::Result<AutoscaleSettingResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
autoscale_setting_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(autoscale_setting_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
autoscale_setting_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/autoscalesettings/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
autoscale_setting_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<AutoscaleSettingResourceCollection, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/autoscalesettings",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AutoscaleSettingResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod operations {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<OperationListResult, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Insights/operations", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: OperationListResult =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod alert_rule_incidents {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
incident_name: &str,
subscription_id: &str,
) -> std::result::Result<Incident, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/alertrules/{}/incidents/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name,
incident_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Incident =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_alert_rule(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<IncidentListResult, list_by_alert_rule::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/alertrules/{}/incidents",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_alert_rule::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_alert_rule::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_alert_rule::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_alert_rule::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: IncidentListResult = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_alert_rule::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list_by_alert_rule::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list_by_alert_rule {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod alert_rules {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<AlertRuleResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
parameters: &AlertRuleResource,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(AlertRuleResource),
Created201(AlertRuleResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
alert_rules_resource: &AlertRuleResourcePatch,
) -> std::result::Result<update::Response, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(alert_rules_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(AlertRuleResource),
Created201(AlertRuleResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
http::StatusCode::OK => Ok(delete::Response::Ok200),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
NoContent204,
Ok200,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<AlertRuleResourceCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/alertrules",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<AlertRuleResourceCollection, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/alertrules",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: AlertRuleResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod log_profiles {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
log_profile_name: &str,
subscription_id: &str,
) -> std::result::Result<LogProfileResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}",
operation_config.base_path(),
subscription_id,
log_profile_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogProfileResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
log_profile_name: &str,
parameters: &LogProfileResource,
subscription_id: &str,
) -> std::result::Result<LogProfileResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}",
operation_config.base_path(),
subscription_id,
log_profile_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogProfileResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(create_or_update::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
log_profile_name: &str,
log_profiles_resource: &LogProfileResourcePatch,
) -> std::result::Result<LogProfileResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}",
operation_config.base_path(),
subscription_id,
log_profile_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(log_profiles_resource).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogProfileResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
log_profile_name: &str,
subscription_id: &str,
) -> std::result::Result<(), delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles/{}",
operation_config.base_path(),
subscription_id,
log_profile_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
status_code => {
let rsp_body = rsp.body();
Err(delete::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<LogProfileCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/logprofiles",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogProfileCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
Err(list::Error::UnexpectedResponse {
status_code,
body: rsp_body.clone(),
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Unexpected HTTP status code {}", status_code)]
UnexpectedResponse { status_code: http::StatusCode, body: bytes::Bytes },
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod diagnostic_settings {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_uri: &str,
name: &str,
) -> std::result::Result<DiagnosticSettingsResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}",
operation_config.base_path(),
resource_uri,
name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiagnosticSettingsResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_uri: &str,
parameters: &DiagnosticSettingsResource,
name: &str,
) -> std::result::Result<DiagnosticSettingsResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}",
operation_config.base_path(),
resource_uri,
name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiagnosticSettingsResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_uri: &str,
name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettings/{}",
operation_config.base_path(),
resource_uri,
name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
) -> std::result::Result<DiagnosticSettingsResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettings",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiagnosticSettingsResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod diagnostic_settings_category {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_uri: &str,
name: &str,
) -> std::result::Result<DiagnosticSettingsCategoryResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettingsCategories/{}",
operation_config.base_path(),
resource_uri,
name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiagnosticSettingsCategoryResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
) -> std::result::Result<DiagnosticSettingsCategoryResourceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/diagnosticSettingsCategories",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: DiagnosticSettingsCategoryResourceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod action_groups {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
action_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ActionGroupResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
action_group_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
action_group_name: &str,
action_group: &ActionGroupResource,
subscription_id: &str,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
action_group_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(action_group).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ActionGroupResource),
Created201(ActionGroupResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
action_group_name: &str,
action_group_patch: &ActionGroupPatchBody,
) -> std::result::Result<ActionGroupResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
action_group_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(action_group_patch).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
action_group_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
action_group_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription_id(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ActionGroupList, list_by_subscription_id::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/microsoft.insights/actionGroups",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription_id::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription_id::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_subscription_id::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription_id::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription_id::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription_id {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
subscription_id: &str,
) -> std::result::Result<ActionGroupList, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActionGroupList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn enable_receiver(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
action_group_name: &str,
enable_request: &EnableRequest,
subscription_id: &str,
) -> std::result::Result<(), enable_receiver::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/actionGroups/{}/subscribe",
operation_config.base_path(),
subscription_id,
resource_group_name,
action_group_name
);
let mut url = url::Url::parse(url_str).map_err(enable_receiver::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(enable_receiver::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(enable_request).map_err(enable_receiver::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(enable_receiver::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(enable_receiver::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(()),
http::StatusCode::CONFLICT => Err(enable_receiver::Error::Conflict409 {}),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| enable_receiver::Error::DeserializeError(source, rsp_body.clone()))?;
Err(enable_receiver::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod enable_receiver {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
Conflict409 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod activity_log_alerts {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
activity_log_alert_name: &str,
) -> std::result::Result<ActivityLogAlertResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
activity_log_alert_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
activity_log_alert_name: &str,
activity_log_alert: &ActivityLogAlertResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
activity_log_alert_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(activity_log_alert).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(ActivityLogAlertResource),
Created201(ActivityLogAlertResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
activity_log_alert_name: &str,
activity_log_alert_patch: &ActivityLogAlertPatchBody,
) -> std::result::Result<ActivityLogAlertResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
activity_log_alert_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(activity_log_alert_patch).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
activity_log_alert_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
activity_log_alert_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription_id(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<ActivityLogAlertList, list_by_subscription_id::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/microsoft.insights/activityLogAlerts",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription_id::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription_id::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_subscription_id::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription_id::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription_id::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription_id::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription_id {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<ActivityLogAlertList, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/microsoft.insights/activityLogAlerts",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: ActivityLogAlertList = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod activity_logs {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
filter: &str,
select: Option<&str>,
subscription_id: &str,
) -> std::result::Result<EventDataCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/eventtypes/management/values",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
url.query_pairs_mut().append_pair("$filter", filter);
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EventDataCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod event_categories {
use crate::models::*;
pub async fn list(operation_config: &crate::OperationConfig) -> std::result::Result<EventCategoryCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!("{}/providers/Microsoft.Insights/eventcategories", operation_config.base_path(),);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EventCategoryCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod tenant_activity_logs {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
select: Option<&str>,
) -> std::result::Result<EventDataCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/providers/Microsoft.Insights/eventtypes/management/values",
operation_config.base_path(),
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(select) = select {
url.query_pairs_mut().append_pair("$select", select);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: EventDataCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod metric_definitions {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
metricnamespace: Option<&str>,
) -> std::result::Result<MetricDefinitionCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/metricDefinitions",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(metricnamespace) = metricnamespace {
url.query_pairs_mut().append_pair("metricnamespace", metricnamespace);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricDefinitionCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod metrics {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
timespan: Option<&str>,
interval: Option<&str>,
metricnames: Option<&str>,
aggregation: Option<&str>,
top: Option<i32>,
orderby: Option<&str>,
filter: Option<&str>,
result_type: Option<&str>,
metricnamespace: Option<&str>,
) -> std::result::Result<Response, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/metrics",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(timespan) = timespan {
url.query_pairs_mut().append_pair("timespan", timespan);
}
if let Some(interval) = interval {
url.query_pairs_mut().append_pair("interval", interval);
}
if let Some(metricnames) = metricnames {
url.query_pairs_mut().append_pair("metricnames", metricnames);
}
if let Some(aggregation) = aggregation {
url.query_pairs_mut().append_pair("aggregation", aggregation);
}
if let Some(top) = top {
url.query_pairs_mut().append_pair("top", top.to_string().as_str());
}
if let Some(orderby) = orderby {
url.query_pairs_mut().append_pair("orderby", orderby);
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(result_type) = result_type {
url.query_pairs_mut().append_pair("resultType", result_type);
}
if let Some(metricnamespace) = metricnamespace {
url.query_pairs_mut().append_pair("metricnamespace", metricnamespace);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: Response =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod baselines {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
metricnames: Option<&str>,
metricnamespace: Option<&str>,
timespan: Option<&str>,
interval: Option<&str>,
aggregation: Option<&str>,
sensitivities: Option<&str>,
filter: Option<&str>,
result_type: Option<&str>,
) -> std::result::Result<MetricBaselinesResponse, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/metricBaselines",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(metricnames) = metricnames {
url.query_pairs_mut().append_pair("metricnames", metricnames);
}
if let Some(metricnamespace) = metricnamespace {
url.query_pairs_mut().append_pair("metricnamespace", metricnamespace);
}
if let Some(timespan) = timespan {
url.query_pairs_mut().append_pair("timespan", timespan);
}
if let Some(interval) = interval {
url.query_pairs_mut().append_pair("interval", interval);
}
if let Some(aggregation) = aggregation {
url.query_pairs_mut().append_pair("aggregation", aggregation);
}
if let Some(sensitivities) = sensitivities {
url.query_pairs_mut().append_pair("sensitivities", sensitivities);
}
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
if let Some(result_type) = result_type {
url.query_pairs_mut().append_pair("resultType", result_type);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricBaselinesResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod metric_alerts {
use crate::models::*;
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
subscription_id: &str,
) -> std::result::Result<MetricAlertResourceCollection, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/metricAlerts",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
) -> std::result::Result<MetricAlertResourceCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn get(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
) -> std::result::Result<MetricAlertResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
parameters: &MetricAlertResource,
) -> std::result::Result<MetricAlertResource, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
parameters: &MetricAlertResourcePatch,
) -> std::result::Result<MetricAlertResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod metric_alerts_status {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
) -> std::result::Result<MetricAlertStatusCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}/status",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertStatusCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_name(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
status_name: &str,
) -> std::result::Result<MetricAlertStatusCollection, list_by_name::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Insights/metricAlerts/{}/status/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name,
status_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_name::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_name::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_name::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_name::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricAlertStatusCollection =
serde_json::from_slice(rsp_body).map_err(|source| list_by_name::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list_by_name::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_name::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_name {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod scheduled_query_rules {
use crate::models::*;
pub async fn get(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<LogSearchRuleResource, get::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(get::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(get::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(get::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResource =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract =
serde_json::from_slice(rsp_body).map_err(|source| get::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn create_or_update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
parameters: &LogSearchRuleResource,
) -> std::result::Result<create_or_update::Response, create_or_update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(create_or_update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(create_or_update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(create_or_update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(create_or_update::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(create_or_update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResource = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(create_or_update::Response::Created201(rsp_value))
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract = serde_json::from_slice(rsp_body)
.map_err(|source| create_or_update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(create_or_update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod create_or_update {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200(LogSearchRuleResource),
Created201(LogSearchRuleResource),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn update(
operation_config: &crate::OperationConfig,
subscription_id: &str,
resource_group_name: &str,
rule_name: &str,
parameters: &LogSearchRuleResourcePatch,
) -> std::result::Result<LogSearchRuleResource, update::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(update::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PATCH);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(update::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
req_builder = req_builder.header("content-type", "application/json");
let req_body = azure_core::to_json(parameters).map_err(update::Error::SerializeError)?;
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(update::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(update::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResource =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract =
serde_json::from_slice(rsp_body).map_err(|source| update::Error::DeserializeError(source, rsp_body.clone()))?;
Err(update::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod update {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn delete(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
rule_name: &str,
subscription_id: &str,
) -> std::result::Result<delete::Response, delete::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules/{}",
operation_config.base_path(),
subscription_id,
resource_group_name,
rule_name
);
let mut url = url::Url::parse(url_str).map_err(delete::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(delete::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(delete::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(delete::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => Ok(delete::Response::Ok200),
http::StatusCode::NO_CONTENT => Ok(delete::Response::NoContent204),
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract =
serde_json::from_slice(rsp_body).map_err(|source| delete::Error::DeserializeError(source, rsp_body.clone()))?;
Err(delete::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod delete {
use crate::{models, models::*};
#[derive(Debug)]
pub enum Response {
Ok200,
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_subscription(
operation_config: &crate::OperationConfig,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<LogSearchRuleResourceCollection, list_by_subscription::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Insights/scheduledQueryRules",
operation_config.base_path(),
subscription_id
);
let mut url = url::Url::parse(url_str).map_err(list_by_subscription::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_subscription::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list_by_subscription::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_subscription::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_subscription::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_subscription::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_subscription {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
pub async fn list_by_resource_group(
operation_config: &crate::OperationConfig,
resource_group_name: &str,
filter: Option<&str>,
subscription_id: &str,
) -> std::result::Result<LogSearchRuleResourceCollection, list_by_resource_group::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/subscriptions/{}/resourcegroups/{}/providers/Microsoft.Insights/scheduledQueryRules",
operation_config.base_path(),
subscription_id,
resource_group_name
);
let mut url = url::Url::parse(url_str).map_err(list_by_resource_group::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list_by_resource_group::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(filter) = filter {
url.query_pairs_mut().append_pair("$filter", filter);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(list_by_resource_group::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(list_by_resource_group::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: LogSearchRuleResourceCollection = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorContract = serde_json::from_slice(rsp_body)
.map_err(|source| list_by_resource_group::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list_by_resource_group::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list_by_resource_group {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorContract,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod metric_namespaces {
use crate::models::*;
pub async fn list(
operation_config: &crate::OperationConfig,
resource_uri: &str,
start_time: Option<&str>,
) -> std::result::Result<MetricNamespaceCollection, list::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/microsoft.insights/metricNamespaces",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(list::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(list::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
if let Some(start_time) = start_time {
url.query_pairs_mut().append_pair("startTime", start_time);
}
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(list::Error::BuildRequestError)?;
let rsp = http_client.execute_request(req).await.map_err(list::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: MetricNamespaceCollection =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ErrorResponse =
serde_json::from_slice(rsp_body).map_err(|source| list::Error::DeserializeError(source, rsp_body.clone()))?;
Err(list::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod list {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorResponse,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
pub mod vm_insights {
use crate::models::*;
pub async fn get_onboarding_status(
operation_config: &crate::OperationConfig,
resource_uri: &str,
) -> std::result::Result<VmInsightsOnboardingStatus, get_onboarding_status::Error> {
let http_client = operation_config.http_client();
let url_str = &format!(
"{}/{}/providers/Microsoft.Insights/vmInsightsOnboardingStatuses/default",
operation_config.base_path(),
resource_uri
);
let mut url = url::Url::parse(url_str).map_err(get_onboarding_status::Error::ParseUrlError)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
if let Some(token_credential) = operation_config.token_credential() {
let token_response = token_credential
.get_token(operation_config.token_credential_resource())
.await
.map_err(get_onboarding_status::Error::GetTokenError)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
}
url.query_pairs_mut().append_pair("api-version", operation_config.api_version());
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder
.body(req_body)
.map_err(get_onboarding_status::Error::BuildRequestError)?;
let rsp = http_client
.execute_request(req)
.await
.map_err(get_onboarding_status::Error::ExecuteRequestError)?;
match rsp.status() {
http::StatusCode::OK => {
let rsp_body = rsp.body();
let rsp_value: VmInsightsOnboardingStatus = serde_json::from_slice(rsp_body)
.map_err(|source| get_onboarding_status::Error::DeserializeError(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = rsp.body();
let rsp_value: ResponseWithError = serde_json::from_slice(rsp_body)
.map_err(|source| get_onboarding_status::Error::DeserializeError(source, rsp_body.clone()))?;
Err(get_onboarding_status::Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
}
pub mod get_onboarding_status {
use crate::{models, models::*};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ResponseWithError,
},
#[error("Failed to parse request URL: {0}")]
ParseUrlError(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequestError(http::Error),
#[error("Failed to execute request: {0}")]
ExecuteRequestError(azure_core::HttpError),
#[error("Failed to serialize request body: {0}")]
SerializeError(serde_json::Error),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
DeserializeError(serde_json::Error, bytes::Bytes),
#[error("Failed to get access token: {0}")]
GetTokenError(azure_core::Error),
}
}
}
| 47.863545 | 136 | 0.58454 |
62ae2e624caa028f37342f12c1fccff467b1d7b2
| 8,122 |
use darling::ast::{Data, Style};
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span};
use quote::quote;
use std::collections::HashSet;
use syn::visit_mut::VisitMut;
use syn::{visit_mut, Error, Lifetime, Type};
use crate::args::{self, RenameTarget};
use crate::utils::{get_crate_name, get_rustdoc, GeneratorResult};
pub fn generate(union_args: &args::Union) -> GeneratorResult<TokenStream> {
let crate_name = get_crate_name(union_args.internal);
let ident = &union_args.ident;
let generics = &union_args.generics;
let s = match &union_args.data {
Data::Enum(s) => s,
_ => {
return Err(Error::new_spanned(&ident, "Union can only be applied to an enum.").into())
}
};
let mut enum_names = Vec::new();
let mut enum_items = HashSet::new();
let mut type_into_impls = Vec::new();
let gql_typename = union_args
.name
.clone()
.unwrap_or_else(|| RenameTarget::Type.rename(ident.to_string()));
let desc = get_rustdoc(&union_args.attrs)?
.map(|s| quote! { ::std::option::Option::Some(#s) })
.unwrap_or_else(|| quote! {::std::option::Option::None});
let mut registry_types = Vec::new();
let mut possible_types = Vec::new();
let mut get_introspection_typename = Vec::new();
let mut collect_all_fields = Vec::new();
for variant in s {
let enum_name = &variant.ident;
let ty = match variant.fields.style {
Style::Tuple if variant.fields.fields.len() == 1 => &variant.fields.fields[0],
Style::Tuple => {
return Err(Error::new_spanned(
enum_name,
"Only single value variants are supported",
)
.into())
}
Style::Unit => {
return Err(
Error::new_spanned(enum_name, "Empty variants are not supported").into(),
)
}
Style::Struct => {
return Err(Error::new_spanned(
enum_name,
"Variants with named fields are not supported",
)
.into())
}
};
if let Type::Path(p) = &ty {
// This validates that the field type wasn't already used
if !enum_items.insert(p) {
return Err(
Error::new_spanned(&ty, "This type already used in another variant").into(),
);
}
enum_names.push(enum_name);
struct RemoveLifetime;
impl VisitMut for RemoveLifetime {
fn visit_lifetime_mut(&mut self, i: &mut Lifetime) {
i.ident = Ident::new("_", Span::call_site());
visit_mut::visit_lifetime_mut(self, i);
}
}
let mut assert_ty = p.clone();
RemoveLifetime.visit_type_path_mut(&mut assert_ty);
if !variant.flatten {
type_into_impls.push(quote! {
#crate_name::static_assertions::assert_impl_one!(#assert_ty: #crate_name::ObjectType);
#[allow(clippy::all, clippy::pedantic)]
impl #generics ::std::convert::From<#p> for #ident #generics {
fn from(obj: #p) -> Self {
#ident::#enum_name(obj)
}
}
});
} else {
type_into_impls.push(quote! {
#crate_name::static_assertions::assert_impl_one!(#assert_ty: #crate_name::UnionType);
#[allow(clippy::all, clippy::pedantic)]
impl #generics ::std::convert::From<#p> for #ident #generics {
fn from(obj: #p) -> Self {
#ident::#enum_name(obj)
}
}
});
}
if !variant.flatten {
registry_types.push(quote! {
<#p as #crate_name::Type>::create_type_info(registry);
});
possible_types.push(quote! {
possible_types.insert(<#p as #crate_name::Type>::type_name().into_owned());
});
} else {
possible_types.push(quote! {
if let #crate_name::registry::MetaType::Union { possible_types: possible_types2, .. } =
registry.create_dummy_type::<#p>() {
possible_types.extend(possible_types2);
}
});
}
if !variant.flatten {
get_introspection_typename.push(quote! {
#ident::#enum_name(obj) => <#p as #crate_name::Type>::type_name()
});
} else {
get_introspection_typename.push(quote! {
#ident::#enum_name(obj) => <#p as #crate_name::Type>::introspection_type_name(obj)
});
}
collect_all_fields.push(quote! {
#ident::#enum_name(obj) => obj.collect_all_fields(ctx, fields)
});
} else {
return Err(Error::new_spanned(ty, "Invalid type").into());
}
}
if possible_types.is_empty() {
return Err(Error::new_spanned(
&ident,
"A GraphQL Union type must include one or more unique member types.",
)
.into());
}
let expanded = quote! {
#(#type_into_impls)*
#[allow(clippy::all, clippy::pedantic)]
impl #generics #crate_name::Type for #ident #generics {
fn type_name() -> ::std::borrow::Cow<'static, ::std::primitive::str> {
::std::borrow::Cow::Borrowed(#gql_typename)
}
fn introspection_type_name(&self) -> ::std::borrow::Cow<'static, ::std::primitive::str> {
match self {
#(#get_introspection_typename),*
}
}
fn create_type_info(registry: &mut #crate_name::registry::Registry) -> ::std::string::String {
registry.create_type::<Self, _>(|registry| {
#(#registry_types)*
#crate_name::registry::MetaType::Union {
name: ::std::borrow::ToOwned::to_owned(#gql_typename),
description: #desc,
possible_types: {
let mut possible_types = #crate_name::indexmap::IndexSet::new();
#(#possible_types)*
possible_types
}
}
})
}
}
#[allow(clippy::all, clippy::pedantic)]
#[#crate_name::async_trait::async_trait(?Send)]
impl #generics #crate_name::resolver_utils::ContainerType for #ident #generics {
async fn resolve_field(&self, ctx: &#crate_name::Context<'_>) -> #crate_name::ServerResult<::std::option::Option<#crate_name::Value>> {
::std::result::Result::Ok(::std::option::Option::None)
}
fn collect_all_fields<'__life>(&'__life self, ctx: &#crate_name::ContextSelectionSet<'__life>, fields: &mut #crate_name::resolver_utils::Fields<'__life>) -> #crate_name::ServerResult<()> {
match self {
#(#collect_all_fields),*
}
}
}
#[allow(clippy::all, clippy::pedantic)]
#[#crate_name::async_trait::async_trait(?Send)]
impl #generics #crate_name::OutputValueType for #ident #generics {
async fn resolve(&self, ctx: &#crate_name::ContextSelectionSet<'_>, _field: &#crate_name::Positioned<#crate_name::parser::types::Field>) -> #crate_name::ServerResult<#crate_name::Value> {
#crate_name::resolver_utils::resolve_container(ctx, self).await
}
}
impl #generics #crate_name::UnionType for #ident #generics {}
};
Ok(expanded.into())
}
| 38.67619 | 200 | 0.509604 |
5ddcaf6eee405ea3f4f55b1fa3d5b5c926f01a9b
| 6,773 |
//! This module implements the logic needed to apply the proper analysis passes to simplify the IR.
use petgraph::algo::tarjan_scc;
use petgraph::Graph;
use rayon::prelude::*;
use std::any::Any;
use std::collections::HashMap;
use std::fmt::Debug;
use crate::analysis::analyzer;
use crate::analysis::analyzer::{
Action, AnalyzerInfo, AnalyzerKind, Change, FuncAnalyzer, ModuleAnalyzer,
};
use crate::analysis::arithmetic::{ArithChange, Arithmetic};
use crate::analysis::copy_propagation::CopyPropagation;
use crate::analysis::cse::cse::CSE;
use crate::analysis::cse::ssasort::Sorter;
use crate::analysis::dce::DCE;
use crate::analysis::functions::fix_ssa_opcalls::CallSiteFixer;
use crate::analysis::functions::infer_regusage::Inferer;
use crate::analysis::inst_combine::Combiner;
use crate::analysis::interproc::fixcall::CallFixer;
use crate::analysis::sccp::SCCP;
use crate::frontend::radeco_containers::{FunctionKind, RadecoFunction, RadecoModule};
use crate::middle::regfile::SubRegisterFile;
fn sort_by_requires(analyzers: &Vec<AnalyzerKind>) -> impl Iterator<Item = AnalyzerKind> {
// Build the dependency graph.
let mut graph = Graph::<AnalyzerKind, ()>::new();
let mut kind2id = HashMap::new();
for analyzer in analyzers {
let n = kind2id
.entry(*analyzer)
.or_insert_with(|| graph.add_node(*analyzer))
.clone();
let info: &'static AnalyzerInfo = From::from(*analyzer);
for dep in info.requires {
let d = kind2id
.entry(*dep)
.or_insert_with(|| graph.add_node(*dep))
.clone();
graph.add_edge(d, n, ());
}
}
// Compute the reverse topological sorting using the Tarjan's algorithm. If SCCs are present
// we can safely run the analyzers within the same SCC in arbitrary order because the `requires`
// property is not garanteed to be respected.
let sccs = tarjan_scc(&graph);
sccs.into_iter().flatten().map(move |id| graph[id]).rev()
}
pub trait EngineResult: Any + Debug {}
pub trait Engine: Any + Debug {
fn run_module(
&self,
rmod: &mut RadecoModule,
regfile: &SubRegisterFile,
) -> Option<Box<dyn EngineResult>>;
fn run_func(&self, rfn: &mut RadecoFunction) -> Option<Box<dyn EngineResult>>;
}
/// Radeco's default engine.
#[derive(Debug)]
pub struct RadecoEngine {
max_iteration: u32,
}
impl RadecoEngine {
pub fn new(max_iteration: u32) -> Self {
RadecoEngine {
max_iteration: max_iteration,
}
}
}
impl Engine for RadecoEngine {
fn run_module(
&self,
rmod: &mut RadecoModule,
regfile: &SubRegisterFile,
) -> Option<Box<dyn EngineResult>> {
radeco_trace!("run_module");
// Analyze preserved for all functions.
{
let bp_name = regfile.get_name_by_alias(&"BP".to_string());
let bp_name = bp_name.map(|s| s.to_owned());
let sp_name = regfile.get_name_by_alias(&"SP".to_string());
let sp_name = sp_name.map(|s| s.to_owned());
let mut callfixer = CallFixer::new(rmod, bp_name, sp_name);
callfixer.rounded_analysis();
}
// Fix call sites
let mut call_site_fixer = CallSiteFixer::new();
call_site_fixer.analyze(rmod, None::<fn(_) -> _>);
// Infer calling conventions
let mut inferer = Inferer::new((*regfile).clone());
inferer.analyze(rmod, None::<fn(_) -> _>);
rmod.functions.par_iter_mut().for_each(|(_, rfn)| {
self.run_func(rfn);
});
None
}
fn run_func(&self, rfn: &mut RadecoFunction) -> Option<Box<dyn EngineResult>> {
radeco_trace!("run_func: {}", rfn.name);
// There is no code for imported/relocated functions.
if rfn.kind != FunctionKind::Local {
return None;
}
// Try to convert the condition codes to relational operators. This should be done before
// all the other passes.
let mut arithmetic = Arithmetic::new();
arithmetic.analyze(
rfn,
Some(|change: Box<dyn Change>| {
let change = change.as_any().downcast_ref::<ArithChange>().unwrap();
if change.new_expr.contains("OpEq")
|| change.new_expr.contains("OpGt")
|| change.new_expr.contains("OpLt")
{
Action::Apply
} else {
Action::Skip
}
}),
);
{
// Sort the IR.
let mut sorter = Sorter::new(rfn.ssa_mut());
sorter.run();
}
let mut analyzers = sort_by_requires(&analyzer::all_func_analyzers());
// Run iteratively all the available analyzers until a stable point or the maximum
// number of iterations is reached.
for _ in 0..self.max_iteration {
let mut stable = true;
// Build and run the analyzers.
while let Some(analyzer) = analyzers.next() {
radeco_trace!("running analyzer: {:?}", analyzer);
// If the policy is called then there is still something to change, thus this is
// not a stable point.
let policy = |_| {
stable = false;
Action::Apply
};
match analyzer {
AnalyzerKind::Arithmetic => {
let mut arithmetic = Arithmetic::new();
arithmetic.analyze(rfn, Some(policy));
}
AnalyzerKind::Combiner => {
let mut combiner = Combiner::new();
combiner.analyze(rfn, Some(policy));
}
AnalyzerKind::CopyPropagation => {
let mut copy_propagation = CopyPropagation::new();
copy_propagation.analyze(rfn, Some(policy));
}
AnalyzerKind::CSE => {
let mut cse = CSE::new();
cse.analyze(rfn, Some(policy));
}
AnalyzerKind::DCE => {
let mut dce = DCE::new();
dce.analyze(rfn, Some(policy));
}
AnalyzerKind::SCCP => {
let mut sccp = SCCP::new();
sccp.analyze(rfn, Some(policy));
}
_ => (),
}
}
if stable {
break;
}
}
None
}
}
| 33.529703 | 100 | 0.545549 |
ac1bd6076c949baf12c50034c4c7bc6467fad03c
| 22 |
pub mod load_examples;
| 22 | 22 | 0.863636 |
8a2602e9bbf45a4a3afcf1d040c8c4cfd6139928
| 5,696 |
use std::process::{Command, ExitStatus, Stdio};
use std::str::FromStr;
use std::path::Path;
use serde::{de, Deserializer, Deserialize};
use serde_json;
#[derive(Deserialize, Debug)]
struct MediaTags {
major_brand: Option<String>, // you can recognize aax files with this one
// one of these is the author
artist: Option<String>, // first choice as it is the standard
author: Option<String>, // this one is mostly audible specific
// one of these is the book name
album: Option<String>, // first choice
parent_title: Option<String>, // second choice, if available at all
title: String, // third choice, should be available always
// one of these is a short description
comment: Option<String>, // id3 tag standard
description: Option<String>, // used in audible formats
// one of these is kind of a publication date
date: Option<String>, // used in aax, probably only year
pub_date_start: Option<String>, // used in older audible format
// narrator/speaker
narrator: Option<String>, // probably only used in older audible format
}
#[derive(Deserialize, Debug)]
struct MediaFormat {
format_name: String,
#[serde(deserialize_with = "f64_from_str")]
duration: f64,
#[serde(deserialize_with = "usize_from_str")]
size: usize,
#[serde(deserialize_with = "u32_from_str")]
bit_rate: u32,
tags: MediaTags,
}
#[derive(Deserialize, Debug)]
struct MediaFormatContainer {
format: MediaFormat,
}
#[derive(Debug)]
pub struct MediaInfo {
pub format: String,
pub author: String,
pub title: String,
pub description: Option<String>,
pub date: Option<String>,
pub narrator: String,
pub duration: i32,
pub size: i32,
pub bit_rate: i32,
}
/// Identify media file (read metadata), uses `ffprobe`
pub fn identify(filename: &Path) -> Result<MediaInfo, serde_json::Error> {
let output = Command::new("ffprobe")
.arg("-print_format")
.arg("json")
.arg("-show_format")
.arg(filename)
.output()
.expect("running ffprobe");
let result = serde_json::from_str::<MediaFormatContainer>(
&String::from_utf8_lossy(&output.stdout).into_owned()
);
match result {
Ok(format) => Ok(parse_format(format.format)),
Err(err) => Err(err),
}
}
pub fn extract_coverart(filename: &Path, output: &Path) -> ExitStatus {
let mut cmd = Command::new("ffmpeg");
cmd
.arg("-v")
.arg("0")
.arg("-i")
.arg(filename)
.arg(output)
.stdin(Stdio::null())
.stdout(Stdio::null())
.status()
.expect("running ffmpeg")
}
/// Decode `aax` audible audio book
pub fn de_aax(filename: &Path, output: &Path, magic_bytes: &str) -> ExitStatus {
let mut cmd = Command::new("ffmpeg");
cmd
.arg("-v")
.arg("1")
.arg("-activation_bytes")
.arg(magic_bytes)
.arg("-i")
.arg(filename)
.arg("-vn")
.arg("-c:a")
.arg("copy")
.arg(output)
.stdin(Stdio::null())
.stdout(Stdio::null())
.status()
.expect("running ffmpeg")
}
/// convert to podcast like format
pub fn convert(filename: &Path, output: &Path, encoder: &str) -> ExitStatus {
let mut cmd = Command::new("ffmpeg");
cmd
.arg("-v")
.arg("1")
.arg("-i")
.arg(filename)
.arg("-vn")
.arg("-c:a")
.arg(encoder)
.arg(output)
.stdin(Stdio::null())
.stdout(Stdio::null())
.status()
.expect("running ffmpeg")
}
fn parse_format(format: MediaFormat) -> MediaInfo {
let format_result =
if format.format_name.contains("mp4") {
format.tags.major_brand.unwrap_or(String::from("mp4")).trim().to_string()
} else {
format.format_name
};
let mut description:Option<String> = None;
if let Some(desc) = format.tags.description {
description = Some(desc);
}
if let Some(desc) = format.tags.comment {
description = Some(desc);
}
let mut date:Option<String> = None;
if let Some(d) = format.tags.pub_date_start {
date = Some(d);
}
if let Some(d) = format.tags.date {
date = Some(d);
}
MediaInfo {
format: format_result,
author:
format.tags.artist.unwrap_or(
format.tags.author.unwrap_or(
String::from("unknown"))),
title:
format.tags.album.unwrap_or(
format.tags.parent_title.unwrap_or(
format.tags.title)),
description,
date,
narrator:
format.tags.narrator.unwrap_or(
String::from("unknown")),
duration: format.duration as i32,
size: format.size as i32,
bit_rate: format.bit_rate as i32,
}
}
/// Convert string to f64 while deserializing
fn f64_from_str<'de, D>(deserializer: D) -> Result<f64, D::Error>
where D: Deserializer<'de>
{
let s = String::deserialize(deserializer)?;
f64::from_str(&s).map_err(de::Error::custom)
}
/// Convert string to usize while deserializing
fn usize_from_str<'de, D>(deserializer: D) -> Result<usize, D::Error>
where D: Deserializer<'de>
{
let s = String::deserialize(deserializer)?;
usize::from_str(&s).map_err(de::Error::custom)
}
/// Convert string to u32 while deserializing
fn u32_from_str<'de, D>(deserializer: D) -> Result<u32, D::Error>
where D: Deserializer<'de>
{
let s = String::deserialize(deserializer)?;
u32::from_str(&s).map_err(de::Error::custom)
}
| 26.867925 | 85 | 0.596559 |
2117ff88bcc669171de9add1a73a638d9c5b7e85
| 1,602 |
//! See [the book](https://imdl.io/book/bittorrent/piece-length-selection.html)
//! for more information on Intermodal's automatic piece length selection
//! algorithm.
use crate::common::*;
pub(crate) struct PieceLengthPicker;
impl PieceLengthPicker {
pub(crate) fn from_content_size(content_size: Bytes) -> Bytes {
#![allow(
clippy::as_conversions,
clippy::cast_sign_loss,
clippy::cast_precision_loss,
clippy::cast_possible_truncation
)]
let exponent = (content_size.count().max(1) as f64).log2().ceil() as u64;
Bytes::from(1u64 << (exponent / 2 + 4))
.max(Bytes::kib() * 16)
.min(Bytes::mib() * 16)
}
pub(crate) fn piece_count(content_size: Bytes, piece_length: Bytes) -> u64 {
if content_size == Bytes::from(0u64) {
0
} else {
(content_size / piece_length).max(1)
}
}
pub(crate) fn metainfo_size(content_size: Bytes, piece_length: Bytes) -> Bytes {
let digest_length: u64 = sha1::DIGEST_LENGTH.into_u64();
Bytes::from(Self::piece_count(content_size, piece_length) * digest_length)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn limits() {
assert_eq!(
PieceLengthPicker::from_content_size(Bytes::mib() * 2),
Bytes::kib() * 16
);
assert_eq!(
PieceLengthPicker::from_content_size(Bytes::mib() * 4),
Bytes::kib() * 32
);
assert_eq!(
PieceLengthPicker::from_content_size(Bytes::mib() * 8),
Bytes::kib() * 32
);
assert_eq!(
PieceLengthPicker::from_content_size(Bytes::mib() * 16),
Bytes::kib() * 64
);
}
}
| 26.262295 | 82 | 0.631086 |
4babb1c677b6c0bac4d1c5112533c1b8b338ba74
| 3,389 |
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use cosmwasm_bignumber::Uint256;
use cosmwasm_std::{Api, CanonicalAddr, Extern, Order, Querier, StdResult, Storage};
use cosmwasm_storage::{Bucket, ReadonlyBucket, ReadonlySingleton, Singleton};
use moneymarket::custody::{BAssetInfo, BorrowerResponse};
const KEY_CONFIG: &[u8] = b"config";
const PREFIX_BORROWER: &[u8] = b"borrower";
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct Config {
pub owner: CanonicalAddr,
pub collateral_token: CanonicalAddr,
pub overseer_contract: CanonicalAddr,
pub market_contract: CanonicalAddr,
pub reward_contract: CanonicalAddr,
pub liquidation_contract: CanonicalAddr,
pub stable_denom: String,
pub basset_info: BAssetInfo,
}
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)]
pub struct BorrowerInfo {
pub balance: Uint256,
pub spendable: Uint256,
}
pub fn store_config<S: Storage>(storage: &mut S, data: &Config) -> StdResult<()> {
Singleton::new(storage, KEY_CONFIG).save(data)
}
pub fn read_config<S: Storage>(storage: &S) -> StdResult<Config> {
ReadonlySingleton::new(storage, KEY_CONFIG).load()
}
pub fn store_borrower_info<S: Storage>(
storage: &mut S,
borrower: &CanonicalAddr,
borrower_info: &BorrowerInfo,
) -> StdResult<()> {
let mut borrower_bucket: Bucket<S, BorrowerInfo> = Bucket::new(PREFIX_BORROWER, storage);
borrower_bucket.save(borrower.as_slice(), &borrower_info)?;
Ok(())
}
pub fn remove_borrower_info<S: Storage>(storage: &mut S, borrower: &CanonicalAddr) {
let mut borrower_bucket: Bucket<S, BorrowerInfo> = Bucket::new(PREFIX_BORROWER, storage);
borrower_bucket.remove(borrower.as_slice());
}
pub fn read_borrower_info<S: Storage>(storage: &S, borrower: &CanonicalAddr) -> BorrowerInfo {
let borrower_bucket: ReadonlyBucket<S, BorrowerInfo> =
ReadonlyBucket::new(PREFIX_BORROWER, storage);
match borrower_bucket.load(&borrower.as_slice()) {
Ok(v) => v,
_ => BorrowerInfo {
balance: Uint256::zero(),
spendable: Uint256::zero(),
},
}
}
// settings for pagination
const MAX_LIMIT: u32 = 30;
const DEFAULT_LIMIT: u32 = 10;
pub fn read_borrowers<S: Storage, A: Api, Q: Querier>(
deps: &Extern<S, A, Q>,
start_after: Option<CanonicalAddr>,
limit: Option<u32>,
) -> StdResult<Vec<BorrowerResponse>> {
let position_bucket: ReadonlyBucket<S, BorrowerInfo> =
ReadonlyBucket::new(PREFIX_BORROWER, &deps.storage);
let limit = limit.unwrap_or(DEFAULT_LIMIT).min(MAX_LIMIT) as usize;
let start = calc_range_start(start_after);
position_bucket
.range(start.as_deref(), None, Order::Ascending)
.take(limit)
.map(|item| {
let (k, v) = item?;
let borrower: CanonicalAddr = CanonicalAddr::from(k);
Ok(BorrowerResponse {
borrower: deps.api.human_address(&borrower)?,
balance: v.balance,
spendable: v.spendable,
})
})
.collect()
}
// this will set the first key after the provided key, by appending a 1 byte
fn calc_range_start(start_after: Option<CanonicalAddr>) -> Option<Vec<u8>> {
start_after.map(|addr| {
let mut v = addr.as_slice().to_vec();
v.push(1);
v
})
}
| 32.902913 | 94 | 0.67483 |
872573a6104a3cb53df7300fb1088a50b8ceb66f
| 706 |
//! Touch Sensor
use super::SensorPort;
use crate::{Attribute, Device, Driver, Ev3Error, Ev3Result};
/// Touch Sensor
#[derive(Debug, Clone, Device)]
pub struct TouchSensor {
driver: Driver,
}
impl TouchSensor {
fn new(driver: Driver) -> Self {
Self {
driver,
}
}
findable!(
"lego-sensor",
"lego-ev3-touch",
SensorPort,
"TouchSensor",
"in"
);
/// Button state
pub const MODE_TOUCH: &'static str = "TOUCH";
sensor!();
/// A boolean indicating whether the current touch sensor is being pressed.
pub fn get_pressed_state(&self) -> Ev3Result<bool> {
Ok(self.get_value0()? != 0)
}
}
| 18.578947 | 79 | 0.572238 |
9134e00da9da73653d0e24c0a81d758b92627455
| 24,824 |
use crate::{
dns::Resolver,
event::{self, Event},
region::RegionOrEndpoint,
serde::to_string,
sinks::util::{
encoding::{EncodingConfigWithDefault, EncodingConfiguration},
retries::RetryLogic,
rusoto, BatchBytesConfig, Buffer, PartitionBuffer, PartitionInnerBuffer, ServiceBuilderExt,
SinkExt, TowerRequestConfig,
},
template::Template,
topology::config::{DataType, SinkConfig, SinkContext, SinkDescription},
};
use bytes::Bytes;
use chrono::Utc;
use futures01::{stream::iter_ok, Future, Poll, Sink};
use lazy_static::lazy_static;
use rusoto_core::{Region, RusotoError, RusotoFuture};
use rusoto_s3::{
HeadBucketRequest, PutObjectError, PutObjectOutput, PutObjectRequest, S3Client, S3,
};
use serde::{Deserialize, Serialize};
use snafu::Snafu;
use std::collections::BTreeMap;
use std::convert::TryInto;
use tower::{Service, ServiceBuilder};
use tracing::field;
use tracing_futures::{Instrument, Instrumented};
use uuid::Uuid;
#[derive(Clone)]
pub struct S3Sink {
client: S3Client,
}
#[derive(Deserialize, Serialize, Debug, Default)]
#[serde(deny_unknown_fields)]
pub struct S3SinkConfig {
pub bucket: String,
pub key_prefix: Option<String>,
pub filename_time_format: Option<String>,
pub filename_append_uuid: Option<bool>,
pub filename_extension: Option<String>,
#[serde(flatten)]
options: S3Options,
#[serde(flatten)]
pub region: RegionOrEndpoint,
#[serde(
skip_serializing_if = "crate::serde::skip_serializing_if_default",
default
)]
pub encoding: EncodingConfigWithDefault<Encoding>,
pub compression: Compression,
#[serde(default)]
pub batch: BatchBytesConfig,
#[serde(default)]
pub request: TowerRequestConfig,
pub assume_role: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
struct S3Options {
acl: Option<S3CannedAcl>,
grant_full_control: Option<String>,
grant_read: Option<String>,
grant_read_acp: Option<String>,
grant_write_acp: Option<String>,
server_side_encryption: Option<S3ServerSideEncryption>,
ssekms_key_id: Option<String>,
storage_class: Option<S3StorageClass>,
tags: Option<BTreeMap<String, String>>,
}
#[derive(Clone, Copy, Debug, Derivative, Deserialize, Serialize)]
#[derivative(Default)]
#[serde(rename_all = "kebab-case")]
enum S3CannedAcl {
#[derivative(Default)]
Private,
PublicRead,
PublicReadWrite,
AwsExecRead,
AuthenticatedRead,
LogDeliveryWrite,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
enum S3ServerSideEncryption {
#[serde(rename = "AES256")]
AES256,
#[serde(rename = "aws:kms")]
AwsKms,
}
#[derive(Clone, Copy, Debug, Derivative, Deserialize, Serialize)]
#[derivative(Default)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
enum S3StorageClass {
#[derivative(Default)]
Standard,
ReducedRedundancy,
IntelligentTiering,
StandardIA,
OnezoneIA,
Glacier,
DeepArchive,
}
lazy_static! {
static ref REQUEST_DEFAULTS: TowerRequestConfig = TowerRequestConfig {
in_flight_limit: Some(50),
rate_limit_num: Some(250),
..Default::default()
};
}
#[derive(Deserialize, Serialize, Debug, Eq, PartialEq, Clone, Derivative)]
#[serde(rename_all = "snake_case")]
#[derivative(Default)]
pub enum Encoding {
#[derivative(Default)]
Text,
Ndjson,
}
#[derive(Deserialize, Serialize, Debug, Clone, Derivative)]
#[serde(rename_all = "snake_case")]
#[derivative(Default)]
pub enum Compression {
#[derivative(Default)]
Gzip,
None,
}
inventory::submit! {
SinkDescription::new::<S3SinkConfig>("aws_s3")
}
#[typetag::serde(name = "aws_s3")]
impl SinkConfig for S3SinkConfig {
fn build(&self, cx: SinkContext) -> crate::Result<(super::RouterSink, super::Healthcheck)> {
let healthcheck = S3Sink::healthcheck(self, cx.resolver())?;
let sink = S3Sink::new(self, cx)?;
Ok((sink, healthcheck))
}
fn input_type(&self) -> DataType {
DataType::Log
}
fn sink_type(&self) -> &'static str {
"aws_s3"
}
}
#[derive(Debug, Snafu)]
enum HealthcheckError {
#[snafu(display("Invalid credentials"))]
InvalidCredentials,
#[snafu(display("Unknown bucket: {:?}", bucket))]
UnknownBucket { bucket: String },
#[snafu(display("Unknown status code: {}", status))]
UnknownStatus { status: http::StatusCode },
}
impl S3Sink {
pub fn new(config: &S3SinkConfig, cx: SinkContext) -> crate::Result<super::RouterSink> {
let request = config.request.unwrap_with(&REQUEST_DEFAULTS);
let encoding = config.encoding.clone();
let compression = match config.compression {
Compression::Gzip => true,
Compression::None => false,
};
let filename_time_format = config.filename_time_format.clone().unwrap_or("%s".into());
let filename_append_uuid = config.filename_append_uuid.unwrap_or(true);
let batch = config.batch.unwrap_or(bytesize::mib(10u64), 300);
let key_prefix = if let Some(kp) = &config.key_prefix {
Template::from(kp.as_str())
} else {
Template::from("date=%F/")
};
let region = config.region.clone().try_into()?;
let s3 = S3Sink {
client: Self::create_client(region, config.assume_role.clone(), cx.resolver())?,
};
let filename_extension = config.filename_extension.clone();
let bucket = config.bucket.clone();
let options = config.options.clone();
let svc = ServiceBuilder::new()
.map(move |req| {
build_request(
req,
filename_time_format.clone(),
filename_extension.clone(),
filename_append_uuid,
compression,
bucket.clone(),
options.clone(),
)
})
.settings(request, S3RetryLogic)
.service(s3);
let sink = crate::sinks::util::BatchServiceSink::new(svc, cx.acker())
.partitioned_batched_with_min(PartitionBuffer::new(Buffer::new(compression)), &batch)
.with_flat_map(move |e| iter_ok(encode_event(e, &key_prefix, &encoding)));
Ok(Box::new(sink))
}
pub fn healthcheck(
config: &S3SinkConfig,
resolver: Resolver,
) -> crate::Result<super::Healthcheck> {
let client = Self::create_client(
config.region.clone().try_into()?,
config.assume_role.clone(),
resolver,
)?;
let request = HeadBucketRequest {
bucket: config.bucket.clone(),
};
let response = client.head_bucket(request);
let bucket = config.bucket.clone();
let healthcheck = response.map_err(|err| match err {
RusotoError::Unknown(response) => match response.status {
http::status::StatusCode::FORBIDDEN => HealthcheckError::InvalidCredentials.into(),
http::status::StatusCode::NOT_FOUND => {
HealthcheckError::UnknownBucket { bucket }.into()
}
status => HealthcheckError::UnknownStatus { status }.into(),
},
err => err.into(),
});
Ok(Box::new(healthcheck))
}
pub fn create_client(
region: Region,
_assume_role: Option<String>,
resolver: Resolver,
) -> crate::Result<S3Client> {
let client = rusoto::client(resolver)?;
#[cfg(not(test))]
let creds = rusoto::AwsCredentialsProvider::new(®ion, _assume_role)?;
// Hack around the fact that rusoto will not pick up runtime
// env vars. This is designed to only for test purposes use
// static credentials.
#[cfg(test)]
let creds =
rusoto::AwsCredentialsProvider::new_minimal("test-access-key", "test-secret-key");
Ok(S3Client::new_with(client, creds, region))
}
}
impl Service<Request> for S3Sink {
type Response = PutObjectOutput;
type Error = RusotoError<PutObjectError>;
type Future = Instrumented<RusotoFuture<PutObjectOutput, PutObjectError>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(().into())
}
fn call(&mut self, request: Request) -> Self::Future {
let options = request.options;
let mut tagging = url::form_urlencoded::Serializer::new(String::new());
if let Some(tags) = options.tags {
for (p, v) in tags {
tagging.append_pair(&p, &v);
}
}
let tagging = tagging.finish();
self.client
.put_object(PutObjectRequest {
body: Some(request.body.into()),
bucket: request.bucket,
key: request.key,
content_encoding: request.content_encoding,
acl: options.acl.map(to_string),
grant_full_control: options.grant_full_control,
grant_read: options.grant_read,
grant_read_acp: options.grant_read_acp,
grant_write_acp: options.grant_write_acp,
server_side_encryption: options.server_side_encryption.map(to_string),
ssekms_key_id: options.ssekms_key_id,
storage_class: options.storage_class.map(to_string),
tagging: Some(tagging),
..Default::default()
})
.instrument(info_span!("request"))
}
}
fn build_request(
req: PartitionInnerBuffer<Vec<u8>, Bytes>,
time_format: String,
extension: Option<String>,
uuid: bool,
gzip: bool,
bucket: String,
options: S3Options,
) -> Request {
let (inner, key) = req.into_parts();
// TODO: pull the seconds from the last event
let filename = {
let seconds = Utc::now().format(&time_format);
if uuid {
let uuid = Uuid::new_v4();
format!("{}-{}", seconds, uuid.to_hyphenated())
} else {
seconds.to_string()
}
};
let extension = extension.unwrap_or_else(|| if gzip { "log.gz".into() } else { "log".into() });
let key = String::from_utf8_lossy(&key[..]).into_owned();
let key = format!("{}{}.{}", key, filename, extension);
debug!(
message = "sending events.",
bytes = &field::debug(inner.len()),
bucket = &field::debug(&bucket),
key = &field::debug(&key)
);
Request {
body: inner,
bucket,
key,
content_encoding: if gzip { Some("gzip".to_string()) } else { None },
options,
}
}
#[derive(Debug, Clone)]
struct Request {
body: Vec<u8>,
bucket: String,
key: String,
content_encoding: Option<String>,
options: S3Options,
}
#[derive(Debug, Clone)]
struct S3RetryLogic;
impl RetryLogic for S3RetryLogic {
type Error = RusotoError<PutObjectError>;
type Response = PutObjectOutput;
fn is_retriable_error(&self, error: &Self::Error) -> bool {
match error {
RusotoError::HttpDispatch(_) => true,
RusotoError::Unknown(res) if res.status.is_server_error() => true,
_ => false,
}
}
}
fn encode_event(
mut event: Event,
key_prefix: &Template,
encoding: &EncodingConfigWithDefault<Encoding>,
) -> Option<PartitionInnerBuffer<Vec<u8>, Bytes>> {
encoding.apply_rules(&mut event);
let key = key_prefix
.render_string(&event)
.map_err(|missing_keys| {
warn!(
message = "Keys do not exist on the event. Dropping event.",
?missing_keys,
rate_limit_secs = 30,
);
})
.ok()?;
let log = event.into_log();
let bytes = match encoding.codec {
Encoding::Ndjson => serde_json::to_vec(&log)
.map(|mut b| {
b.push(b'\n');
b
})
.expect("Failed to encode event as json, this is a bug!"),
Encoding::Text => {
let mut bytes = log
.get(&event::log_schema().message_key())
.map(|v| v.as_bytes().to_vec())
.unwrap_or_default();
bytes.push(b'\n');
bytes
}
};
Some(PartitionInnerBuffer::new(bytes, key.into()))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event::{self, Event};
use std::collections::BTreeMap;
#[test]
fn s3_encode_event_text() {
let message = "hello world".to_string();
let batch_time_format = Template::from("date=%F");
let bytes = encode_event(
message.clone().into(),
&batch_time_format,
&Encoding::Text.into(),
)
.unwrap();
let encoded_message = message + "\n";
let (bytes, _) = bytes.into_parts();
assert_eq!(&bytes[..], encoded_message.as_bytes());
}
#[test]
fn s3_encode_event_ndjson() {
let message = "hello world".to_string();
let mut event = Event::from(message.clone());
event.as_mut_log().insert("key", "value");
let batch_time_format = Template::from("date=%F");
let bytes = encode_event(event, &batch_time_format, &Encoding::Ndjson.into()).unwrap();
let (bytes, _) = bytes.into_parts();
let map: BTreeMap<String, String> = serde_json::from_slice(&bytes[..]).unwrap();
assert_eq!(map[&event::log_schema().message_key().to_string()], message);
assert_eq!(map["key"], "value".to_string());
}
#[test]
fn s3_build_request() {
let buf = PartitionInnerBuffer::new(vec![0u8; 10], Bytes::from("key/"));
let req = build_request(
buf.clone(),
"date".into(),
Some("ext".into()),
false,
false,
"bucket".into(),
S3Options::default(),
);
assert_eq!(req.key, "key/date.ext".to_string());
let req = build_request(
buf.clone(),
"date".into(),
None,
false,
false,
"bucket".into(),
S3Options::default(),
);
assert_eq!(req.key, "key/date.log".to_string());
let req = build_request(
buf.clone(),
"date".into(),
None,
false,
true,
"bucket".into(),
S3Options::default(),
);
assert_eq!(req.key, "key/date.log.gz".to_string());
let req = build_request(
buf.clone(),
"date".into(),
None,
true,
true,
"bucket".into(),
S3Options::default(),
);
assert_ne!(req.key, "key/date.log.gz".to_string());
}
}
#[cfg(feature = "s3-integration-tests")]
#[cfg(test)]
mod integration_tests {
use super::*;
use crate::{
assert_downcast_matches,
dns::Resolver,
event::Event,
region::RegionOrEndpoint,
runtime::Runtime,
sinks::aws_s3::{S3Sink, S3SinkConfig},
test_util::{random_lines_with_stream, random_string, runtime},
topology::config::SinkContext,
};
use flate2::read::GzDecoder;
use futures01::{Future, Sink};
use pretty_assertions::assert_eq;
use rusoto_core::region::Region;
use rusoto_s3::{S3Client, S3};
use std::io::{BufRead, BufReader};
const BUCKET: &str = "router-tests";
#[test]
fn s3_insert_message_into() {
let mut rt = runtime();
let cx = SinkContext::new_test(rt.executor());
let config = config(1000000);
let prefix = config.key_prefix.clone();
let sink = S3Sink::new(&config, cx).unwrap();
let (lines, events) = random_lines_with_stream(100, 10);
let pump = sink.send_all(events);
let _ = rt.block_on(pump).unwrap();
let keys = get_keys(prefix.unwrap());
assert_eq!(keys.len(), 1);
let key = keys[0].clone();
assert!(key.ends_with(".log"));
let obj = get_object(key);
assert_eq!(obj.content_encoding, None);
let response_lines = get_lines(obj);
assert_eq!(lines, response_lines);
}
#[test]
fn s3_rotate_files_after_the_buffer_size_is_reached() {
let mut rt = runtime();
let cx = SinkContext::new_test(rt.executor());
ensure_bucket(&client());
let config = S3SinkConfig {
key_prefix: Some(format!("{}/{}", random_string(10), "{{i}}")),
filename_time_format: Some("waitsforfullbatch".into()),
filename_append_uuid: Some(false),
..config(1000)
};
let prefix = config.key_prefix.clone();
let sink = S3Sink::new(&config, cx).unwrap();
let (lines, _events) = random_lines_with_stream(100, 30);
let events = lines.clone().into_iter().enumerate().map(|(i, line)| {
let mut e = Event::from(line);
let i = if i < 10 {
1
} else if i < 20 {
2
} else {
3
};
e.as_mut_log().insert("i", format!("{}", i));
e
});
let pump = sink.send_all(futures01::stream::iter_ok(events));
let _ = rt.block_on(pump).unwrap();
let keys = get_keys(prefix.unwrap());
assert_eq!(keys.len(), 3);
let response_lines = keys
.into_iter()
.map(|key| get_lines(get_object(key)))
.collect::<Vec<_>>();
assert_eq!(&lines[00..10], response_lines[0].as_slice());
assert_eq!(&lines[10..20], response_lines[1].as_slice());
assert_eq!(&lines[20..30], response_lines[2].as_slice());
}
#[test]
fn s3_waits_for_full_batch_or_timeout_before_sending() {
let rt = runtime();
let cx = SinkContext::new_test(rt.executor());
ensure_bucket(&client());
let config = S3SinkConfig {
key_prefix: Some(format!("{}/{}", random_string(10), "{{i}}")),
filename_time_format: Some("waitsforfullbatch".into()),
filename_append_uuid: Some(false),
..config(1000)
};
let prefix = config.key_prefix.clone();
let sink = S3Sink::new(&config, cx).unwrap();
let (lines, _) = random_lines_with_stream(100, 30);
let (tx, rx) = futures01::sync::mpsc::channel(1);
let pump = sink.send_all(rx).map(|_| ()).map_err(|_| ());
let mut rt = Runtime::new().unwrap();
rt.spawn(pump);
let mut tx = tx.wait();
for (i, line) in lines.iter().enumerate().take(15) {
let mut event = Event::from(line.as_str());
let i = if i < 10 { 1 } else { 2 };
event.as_mut_log().insert("i", format!("{}", i));
tx.send(event).unwrap();
}
std::thread::sleep(std::time::Duration::from_millis(100));
for (i, line) in lines.iter().skip(15).enumerate() {
let mut event = Event::from(line.as_str());
let i = if i < 5 { 2 } else { 3 };
event.as_mut_log().insert("i", format!("{}", i));
tx.send(event).unwrap();
}
drop(tx);
crate::test_util::shutdown_on_idle(rt);
let keys = get_keys(prefix.unwrap());
assert_eq!(keys.len(), 3);
let response_lines = keys
.into_iter()
.map(|key| get_lines(get_object(key)))
.collect::<Vec<_>>();
assert_eq!(&lines[00..10], response_lines[0].as_slice());
assert_eq!(&lines[10..20], response_lines[1].as_slice());
assert_eq!(&lines[20..30], response_lines[2].as_slice());
}
#[test]
fn s3_gzip() {
let mut rt = runtime();
let cx = SinkContext::new_test(rt.executor());
ensure_bucket(&client());
let config = S3SinkConfig {
compression: Compression::Gzip,
filename_time_format: Some("%S%f".into()),
..config(1000)
};
let prefix = config.key_prefix.clone();
let sink = S3Sink::new(&config, cx).unwrap();
let (lines, events) = random_lines_with_stream(100, 500);
let pump = sink.send_all(events);
let _ = rt.block_on(pump).unwrap();
let keys = get_keys(prefix.unwrap());
assert_eq!(keys.len(), 2);
let response_lines = keys
.into_iter()
.map(|key| {
assert!(key.ends_with(".log.gz"));
let obj = get_object(key);
assert_eq!(obj.content_encoding, Some("gzip".to_string()));
get_gzipped_lines(obj)
})
.flatten()
.collect::<Vec<_>>();
assert_eq!(lines, response_lines);
}
#[test]
fn s3_healthchecks() {
let mut rt = Runtime::new().unwrap();
let resolver = Resolver::new(Vec::new(), rt.executor()).unwrap();
let healthcheck = S3Sink::healthcheck(&config(1), resolver).unwrap();
rt.block_on(healthcheck).unwrap();
}
#[test]
fn s3_healthchecks_invalid_bucket() {
let mut rt = Runtime::new().unwrap();
let resolver = Resolver::new(Vec::new(), rt.executor()).unwrap();
let config = S3SinkConfig {
bucket: "asdflkjadskdaadsfadf".to_string(),
..config(1)
};
let healthcheck = S3Sink::healthcheck(&config, resolver).unwrap();
assert_downcast_matches!(
rt.block_on(healthcheck).unwrap_err(),
HealthcheckError,
HealthcheckError::UnknownBucket{ .. }
);
}
fn client() -> S3Client {
let region = Region::Custom {
name: "minio".to_owned(),
endpoint: "http://localhost:9000".to_owned(),
};
use rusoto_core::HttpClient;
use rusoto_credential::StaticProvider;
let p = StaticProvider::new_minimal("test-access-key".into(), "test-secret-key".into());
let d = HttpClient::new().unwrap();
S3Client::new_with(d, p, region)
}
fn config(batch_size: usize) -> S3SinkConfig {
ensure_bucket(&client());
S3SinkConfig {
key_prefix: Some(random_string(10) + "/date=%F/"),
bucket: BUCKET.to_string(),
compression: Compression::None,
batch: BatchBytesConfig {
max_size: Some(batch_size),
timeout_secs: Some(5),
},
region: RegionOrEndpoint::with_endpoint("http://localhost:9000".to_owned()),
..Default::default()
}
}
fn ensure_bucket(client: &S3Client) {
use rusoto_s3::{CreateBucketError, CreateBucketRequest};
let req = CreateBucketRequest {
bucket: BUCKET.to_string(),
..Default::default()
};
let res = client.create_bucket(req);
match res.sync() {
Ok(_) | Err(RusotoError::Service(CreateBucketError::BucketAlreadyOwnedByYou(_))) => {}
Err(e) => match e {
RusotoError::Unknown(b) => {
let body = String::from_utf8_lossy(&b.body[..]);
panic!("Couldn't create bucket: {:?}; Body {}", b, body);
}
_ => panic!("Couldn't create bucket: {}", e),
},
}
}
fn get_keys(prefix: String) -> Vec<String> {
let prefix = prefix.split("/").into_iter().next().unwrap().to_string();
let list_res = client()
.list_objects_v2(rusoto_s3::ListObjectsV2Request {
bucket: BUCKET.to_string(),
prefix: Some(prefix),
..Default::default()
})
.sync()
.unwrap();
list_res
.contents
.unwrap()
.into_iter()
.map(|obj| obj.key.unwrap())
.collect()
}
fn get_object(key: String) -> rusoto_s3::GetObjectOutput {
client()
.get_object(rusoto_s3::GetObjectRequest {
bucket: BUCKET.to_string(),
key,
..Default::default()
})
.sync()
.unwrap()
}
fn get_lines(obj: rusoto_s3::GetObjectOutput) -> Vec<String> {
let buf_read = BufReader::new(obj.body.unwrap().into_blocking_read());
buf_read.lines().map(|l| l.unwrap()).collect()
}
fn get_gzipped_lines(obj: rusoto_s3::GetObjectOutput) -> Vec<String> {
let buf_read = BufReader::new(GzDecoder::new(obj.body.unwrap().into_blocking_read()));
buf_read.lines().map(|l| l.unwrap()).collect()
}
}
| 29.980676 | 99 | 0.566347 |
e5eacc288416bfb12f0af7e7b3b57aa5fbd6280b
| 784 |
use proptest::prop_assert_eq;
use proptest::test_runner::{Config, TestRunner};
use crate::erlang::is_bitstring_1::native;
use crate::test::strategy;
use crate::test::with_process_arc;
#[test]
fn without_bitstring_returns_false() {
run!(
|arc_process| strategy::term::is_not_bitstring(arc_process.clone()),
|term| {
prop_assert_eq!(native(term), false.into());
Ok(())
},
);
}
#[test]
fn with_bitstring_returns_true() {
with_process_arc(|arc_process| {
TestRunner::new(Config::with_source_file(file!()))
.run(&strategy::term::is_bitstring(arc_process.clone()), |term| {
prop_assert_eq!(native(term), true.into());
Ok(())
})
.unwrap();
});
}
| 24.5 | 77 | 0.593112 |
fc17b9278192bc0b8a8f03266f67dd24d23cf8cb
| 104 |
use d3dx12::build::copy_data_file;
fn main() {
copy_data_file("src/hello-texture-shaders.hlsl");
}
| 17.333333 | 53 | 0.711538 |
33e89486d86dc23c2c04746143daca7798c22f24
| 1,114 |
/*
* Copyright 2019 Cargill Incorporated
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* -----------------------------------------------------------------------------
*/
pub(crate) mod models;
pub(crate) mod schema;
embed_migrations!("./src/biome/notifications/database/migrations");
use diesel::pg::PgConnection;
pub use crate::database::error::DatabaseError;
pub fn run_migrations(conn: &PgConnection) -> Result<(), DatabaseError> {
embedded_migrations::run(conn).map_err(|err| DatabaseError::ConnectionError(Box::new(err)))?;
info!("Successfully applied migrations");
Ok(())
}
| 32.764706 | 97 | 0.681329 |
09c5768f86392fed51e117eab44eff754ab6fcc8
| 11,032 |
#[doc(hidden)]
#[macro_export]
macro_rules! bench_header(
($name:ident, $ty:ty, $value:expr) => {
#[cfg(test)]
#[cfg(feature = "nightly")]
mod $name {
use test::Bencher;
use super::*;
use header::{Header};
#[bench]
fn bench_parse(b: &mut Bencher) {
let val = $value.into();
b.iter(|| {
let _: $ty = Header::parse_header(&val).unwrap();
});
}
#[bench]
fn bench_format(b: &mut Bencher) {
let raw = $value.into();
let val: $ty = Header::parse_header(&raw).unwrap();
b.iter(|| {
format!("{}", val);
});
}
}
}
);
#[doc(hidden)]
#[macro_export]
macro_rules! __hyper__deref {
($from:ty => $to:ty) => {
impl ::std::ops::Deref for $from {
type Target = $to;
#[inline]
fn deref(&self) -> &$to {
&self.0
}
}
impl ::std::ops::DerefMut for $from {
#[inline]
fn deref_mut(&mut self) -> &mut $to {
&mut self.0
}
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! __hyper__tm {
($id:ident, $tm:ident{$($tf:item)*}) => {
#[allow(unused_imports)]
#[cfg(test)]
mod $tm{
use std::str;
use $crate::header::*;
use $crate::mime::*;
use $crate::method::Method;
use super::$id as HeaderField;
$($tf)*
}
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! test_header {
($id:ident, $raw:expr) => {
#[test]
fn $id() {
#[allow(unused, deprecated)]
use std::ascii::AsciiExt;
let raw = $raw;
let a: Vec<Vec<u8>> = raw.iter().map(|x| x.to_vec()).collect();
let a = a.into();
let value = HeaderField::parse_header(&a);
let result = format!("{}", value.unwrap());
let expected = String::from_utf8(raw[0].to_vec()).unwrap();
let result_cmp: Vec<String> = result
.to_ascii_lowercase()
.split(' ')
.map(|x| x.to_owned())
.collect();
let expected_cmp: Vec<String> = expected
.to_ascii_lowercase()
.split(' ')
.map(|x| x.to_owned())
.collect();
assert_eq!(result_cmp.concat(), expected_cmp.concat());
}
};
($id:ident, $raw:expr, $typed:expr) => {
#[test]
fn $id() {
let a: Vec<Vec<u8>> = $raw.iter().map(|x| x.to_vec()).collect();
let a = a.into();
let val = HeaderField::parse_header(&a);
let typed: Option<HeaderField> = $typed;
// Test parsing
assert_eq!(val.ok(), typed);
// Test formatting
if typed.is_some() {
let raw = &($raw)[..];
let mut iter = raw.iter().map(|b|str::from_utf8(&b[..]).unwrap());
let mut joined = String::new();
joined.push_str(iter.next().unwrap());
for s in iter {
joined.push_str(", ");
joined.push_str(s);
}
assert_eq!(format!("{}", typed.unwrap()), joined);
}
}
}
}
#[macro_export]
macro_rules! header {
// $a:meta: Attributes associated with the header item (usually docs)
// $id:ident: Identifier of the header
// $n:expr: Lowercase name of the header
// $nn:expr: Nice name of the header
// List header, zero or more items
($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)*) => {
$(#[$a])*
#[derive(Clone, Debug, PartialEq)]
pub struct $id(pub Vec<$item>);
__hyper__deref!($id => Vec<$item>);
impl $crate::header::Header for $id {
fn header_name() -> &'static str {
static NAME: &'static str = $n;
NAME
}
#[inline]
fn parse_header(raw: &$crate::header::Raw) -> $crate::Result<Self> {
$crate::header::parsing::from_comma_delimited(raw).map($id)
}
#[inline]
fn fmt_header(&self, f: &mut $crate::header::Formatter) -> ::std::fmt::Result {
f.fmt_line(self)
}
}
impl ::std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
$crate::header::parsing::fmt_comma_delimited(f, &self.0[..])
}
}
};
// List header, one or more items
($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+) => {
$(#[$a])*
#[derive(Clone, Debug, PartialEq)]
pub struct $id(pub Vec<$item>);
__hyper__deref!($id => Vec<$item>);
impl $crate::header::Header for $id {
#[inline]
fn header_name() -> &'static str {
static NAME: &'static str = $n;
NAME
}
#[inline]
fn parse_header(raw: &$crate::header::Raw) -> $crate::Result<Self> {
$crate::header::parsing::from_comma_delimited(raw).map($id)
}
#[inline]
fn fmt_header(&self, f: &mut $crate::header::Formatter) -> ::std::fmt::Result {
f.fmt_line(self)
}
}
impl ::std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
$crate::header::parsing::fmt_comma_delimited(f, &self.0[..])
}
}
};
// Single value header
($(#[$a:meta])*($id:ident, $n:expr) => [$value:ty]) => {
$(#[$a])*
#[derive(Clone, Debug, PartialEq)]
pub struct $id(pub $value);
__hyper__deref!($id => $value);
impl $crate::header::Header for $id {
#[inline]
fn header_name() -> &'static str {
static NAME: &'static str = $n;
NAME
}
#[inline]
fn parse_header(raw: &$crate::header::Raw) -> $crate::Result<Self> {
$crate::header::parsing::from_one_raw_str(raw).map($id)
}
#[inline]
fn fmt_header(&self, f: &mut $crate::header::Formatter) -> ::std::fmt::Result {
f.fmt_line(self)
}
}
impl ::std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::std::fmt::Display::fmt(&self.0, f)
}
}
};
// Single value cow header
($(#[$a:meta])*($id:ident, $n:expr) => Cow[$value:ty]) => {
$(#[$a])*
#[derive(Clone, Debug, PartialEq)]
pub struct $id(::std::borrow::Cow<'static,$value>);
impl $id {
/// Creates a new $id
pub fn new<I: Into<::std::borrow::Cow<'static,$value>>>(value: I) -> Self {
$id(value.into())
}
}
impl ::std::ops::Deref for $id {
type Target = $value;
#[inline]
fn deref(&self) -> &Self::Target {
&(self.0)
}
}
impl $crate::header::Header for $id {
#[inline]
fn header_name() -> &'static str {
static NAME: &'static str = $n;
NAME
}
#[inline]
fn parse_header(raw: &$crate::header::Raw) -> $crate::Result<Self> {
$crate::header::parsing::from_one_raw_str::<<$value as ::std::borrow::ToOwned>::Owned>(raw).map($id::new)
}
#[inline]
fn fmt_header(&self, f: &mut $crate::header::Formatter) -> ::std::fmt::Result {
f.fmt_line(self)
}
}
impl ::std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
::std::fmt::Display::fmt(&self.0, f)
}
}
};
// List header, one or more items with "*" option
($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+}) => {
$(#[$a])*
#[derive(Clone, Debug, PartialEq)]
pub enum $id {
/// Any value is a match
Any,
/// Only the listed items are a match
Items(Vec<$item>),
}
impl $crate::header::Header for $id {
#[inline]
fn header_name() -> &'static str {
static NAME: &'static str = $n;
NAME
}
#[inline]
fn parse_header(raw: &$crate::header::Raw) -> $crate::Result<Self> {
// FIXME: Return None if no item is in $id::Only
if raw.len() == 1 {
if &raw[0] == b"*" {
return Ok($id::Any)
}
}
$crate::header::parsing::from_comma_delimited(raw).map($id::Items)
}
#[inline]
fn fmt_header(&self, f: &mut $crate::header::Formatter) -> ::std::fmt::Result {
f.fmt_line(self)
}
}
impl ::std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
$id::Any => f.write_str("*"),
$id::Items(ref fields) => $crate::header::parsing::fmt_comma_delimited(
f, &fields[..])
}
}
}
};
// optional test module
($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)* $tm:ident{$($tf:item)*}) => {
header! {
$(#[$a])*
($id, $n) => ($item)*
}
__hyper__tm! { $id, $tm { $($tf)* }}
};
($(#[$a:meta])*($id:ident, $n:expr) => ($item:ty)+ $tm:ident{$($tf:item)*}) => {
header! {
$(#[$a])*
($id, $n) => ($item)+
}
__hyper__tm! { $id, $tm { $($tf)* }}
};
($(#[$a:meta])*($id:ident, $n:expr) => [$item:ty] $tm:ident{$($tf:item)*}) => {
header! {
$(#[$a])*
($id, $n) => [$item]
}
__hyper__tm! { $id, $tm { $($tf)* }}
};
($(#[$a:meta])*($id:ident, $n:expr) => Cow[$item:ty] $tm:ident{$($tf:item)*}) => {
header! {
$(#[$a])*
($id, $n) => Cow[$item]
}
__hyper__tm! { $id, $tm { $($tf)* }}
};
($(#[$a:meta])*($id:ident, $n:expr) => {Any / ($item:ty)+} $tm:ident{$($tf:item)*}) => {
header! {
$(#[$a])*
($id, $n) => {Any / ($item)+}
}
__hyper__tm! { $id, $tm { $($tf)* }}
};
}
| 32.25731 | 121 | 0.418782 |
dd0a02c1ffa3ba1b01c703b0840562b133c2d5e8
| 1,017 |
use std::{error::Error, str::FromStr};
use advent_utils::{parse_raw_data, Part, Solver};
mod command;
mod memory;
use command::Command;
use memory::Memory;
#[derive(Debug)]
pub struct Solution {
commands: Vec<Command>,
}
impl FromStr for Solution {
type Err = Box<dyn Error>;
fn from_str(input_data: &str) -> Result<Self, Self::Err> {
Ok(Self {
commands: parse_raw_data(input_data)?,
})
}
}
impl Solver for Solution {
fn day_number() -> u32 {
14
}
fn solve(&self, part: Part) -> String {
let mut mem = Memory::new();
match part {
Part::One => {
self.commands.iter().for_each(|c| mem.process_command(c));
format!(
"sum of values in memory: {}",
mem.data().values().sum::<u64>()
)
}
Part::Two => unimplemented!(),
}
}
fn implemented_parts() -> Vec<Part> {
vec![Part::One]
}
}
| 19.941176 | 74 | 0.512291 |
7aa6af1d677c310b444c458d639a58b2feae0b8e
| 1,539 |
// Copyright (c) SimpleStaking and Tezedge Contributors
// SPDX-License-Identifier: MIT
fn main() {
#[cfg(feature = "user")]
build_bpf()
}
#[cfg(feature = "user")]
fn build_bpf() {
use std::{env, process::Command};
let target_dir = env::var("CARGO_TARGET_DIR").unwrap_or_else(|_| "../target".to_string());
let target_dir = format!("{}/bpf", target_dir);
let args = &[
"+nightly-2020-12-31",
"rustc",
"--package=bpf-recorder",
"--bin=bpf-recorder-kern",
"--features=kern",
"--no-default-features",
"--",
"-Clinker-plugin-lto",
"-Clinker-flavor=wasm-ld",
"-Clinker=bpf-linker",
"-Clink-arg=--target=bpf",
"-Clink-arg=-O3",
];
let output = Command::new("cargo")
.env("RUSTFLAGS", "")
.env("CARGO_TARGET_DIR", &target_dir)
.args(args)
.output()
.expect("failed to build bpf code");
if !output.status.success() {
let error = String::from_utf8(output.stderr).expect("malformed error message");
panic!("{}", error);
}
Command::new("sed")
.current_dir(&target_dir)
.arg("-i")
.arg("s/ty__/type/g")
.arg("debug/bpf-recorder-kern")
.output()
.expect("failed to patch bpf object");
println!(
"cargo:rustc-env=BPF_CODE_RECORDER={}/debug/bpf-recorder-kern",
target_dir
);
println!("cargo:rerun-if-changed=src/main.rs");
println!("cargo:rerun-if-changed=src/event.rs");
}
| 27.981818 | 94 | 0.556855 |
8ff0f5d08e962f8eae0adb1db0f593abeafa0fdc
| 4,128 |
//! # BER/DER Parser
//!
//! A parser for Basic Encoding Rules (BER [[X.690]]) and Distinguished Encoding Rules(DER
//! [[X.690]]), implemented with the [nom](https://github.com/Geal/nom) parser combinator
//! framework.
//!
//! The code is available on [Github](https://github.com/rusticata/der-parser)
//! and is part of the [Rusticata](https://github.com/rusticata) project.
//!
//! # DER parser design
//!
//! There are two different approaches for parsing DER objects: reading the objects recursively as
//! long as the tags are known, or specifying a description of the expected objects (generally from
//! the [ASN.1][X.680] description).
//!
//! The first parsing method can be done using the [`parse_ber`](ber/fn.parse_ber.html) and
//! [`parse_der`](der/fn.parse_der.html) methods.
//! However, it cannot fully parse all objects, especially those containing IMPLICIT, OPTIONAL, or
//! DEFINED BY items.
//!
//! ```rust
//! # #[macro_use] extern crate der_parser;
//! use der_parser::parse_der;
//!
//! # fn main() {
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//!
//! let parsed = parse_der(&bytes);
//! # }
//! ```
//!
//! The second (and preferred) parsing method is to specify the expected objects recursively. The
//! following macros can be used:
//! [`parse_der_sequence_defined`](macro.parse_der_sequence_defined.html) and similar functions,
//! [`parse_der_struct`](macro.parse_der_struct.html), etc.
//!
//! For example, to read a sequence containing two integers:
//!
//! ```rust
//! # #[macro_use] extern crate nom;
//! # #[macro_use] extern crate rusticata_macros;
//! # #[macro_use] extern crate der_parser;
//! use der_parser::ber::*;
//! use der_parser::error::BerError;
//! use nom::{IResult,Err};
//!
//! # fn main() {
//! fn localparse_seq(i:&[u8]) -> IResult<&[u8], BerObject, BerError> {
//! parse_der_sequence_defined!(i,
//! parse_ber_integer >>
//! parse_ber_integer
//! )
//! }
//! let bytes = [ 0x30, 0x0a,
//! 0x02, 0x03, 0x01, 0x00, 0x01,
//! 0x02, 0x03, 0x01, 0x00, 0x00,
//! ];
//! let parsed = localparse_seq(&bytes);
//! # }
//! ```
//!
//! All functions return an `IResult` object from `nom`: the parsed
//! [`BerObject`](ber/struct.BerObject.html), an `Incomplete` value, or an error.
//!
//! # Notes
//!
//! - The DER constraints are verified if using `parse_der`.
//! - `BerObject` and `DerObject` are the same objects (type alias). The only difference is the
//! verification of constraints *during parsing*.
//! - DER integers can be of any size, so it is not possible to store them as simple integers (they
//! are stored as raw bytes). To get a simple value, use
//! [`BerObject::as_u32`](ber/struct.BerObject.html#method.as_u32) (knowning that this method will
//! return an error if the integer is too large), [`BerObject::as_u64`](ber/struct.BerObject.html#method.as_u64),
//! or use the `bigint` feature of this crate and use
//! [`BerObject::as_bigint`](ber/struct.BerObject.html#method.as_bigint).
//!
//! # References
//!
//! - [[X.680]] Abstract Syntax Notation One (ASN.1): Specification of basic notation.
//! - [[X.690]] ASN.1 encoding rules: Specification of Basic Encoding Rules (BER), Canonical
//! Encoding Rules (CER) and Distinguished Encoding Rules (DER).
//!
//! [X.680]: http://www.itu.int/rec/T-REC-X.680/en "Abstract Syntax Notation One (ASN.1):
//! Specification of basic notation."
//! [X.690]: https://www.itu.int/rec/T-REC-X.690/en "ASN.1 encoding rules: Specification of
//! Basic Encoding Rules (BER), Canonical Encoding Rules (CER) and Distinguished Encoding Rules
//! (DER)."
#![deny(/*missing_docs,*/unsafe_code,
unstable_features,
unused_import_braces, unused_qualifications)]
#[macro_use]
extern crate nom;
#[macro_use]
extern crate rusticata_macros;
#[macro_use]
mod macros;
pub mod ber;
pub mod der;
pub mod error;
pub mod oid;
// compatibility: re-export at crate root
pub use ber::parse_ber;
pub use der::parse_der;
#[cfg(feature = "bigint")]
extern crate num_bigint;
| 35.586207 | 113 | 0.666909 |
e9a588a1ca49b0f2db51c93d54f966dc814017c8
| 17,933 |
//! Event handling types.
use crate::system::{Local, Res, ResMut, SystemParam};
use crate::{self as bevy_ecs, system::Resource};
use bevy_utils::tracing::trace;
use std::{
fmt::{self},
hash::Hash,
marker::PhantomData,
};
/// An `EventId` uniquely identifies an event.
///
/// An `EventId` can among other things be used to trace the flow of an event from the point it was
/// sent to the point it was processed.
#[derive(Eq, PartialEq, Ord, PartialOrd, Hash)]
pub struct EventId<T> {
pub id: usize,
_marker: PhantomData<T>,
}
impl<T> Copy for EventId<T> {}
impl<T> Clone for EventId<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> fmt::Display for EventId<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
<Self as fmt::Debug>::fmt(self, f)
}
}
impl<T> fmt::Debug for EventId<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"event<{}>#{}",
std::any::type_name::<T>().split("::").last().unwrap(),
self.id,
)
}
}
#[derive(Debug)]
struct EventInstance<T> {
pub event_id: EventId<T>,
pub event: T,
}
#[derive(Debug)]
enum State {
A,
B,
}
/// An event collection that represents the events that occurred within the last two
/// [`Events::update`] calls.
/// Events can be written to using an [`EventWriter`]
/// and are typically cheaply read using an [`EventReader`].
///
/// Each event can be consumed by multiple systems, in parallel,
/// with consumption tracked by the [`EventReader`] on a per-system basis.
///
/// This collection is meant to be paired with a system that calls
/// [`Events::update`] exactly once per update/frame.
///
/// [`Events::update_system`] is a system that does this, typically intialized automatically using
/// [`App::add_event`]. [`EventReader`]s are expected to read events from this collection at
/// least once per loop/frame.
/// Events will persist across a single frame boundary and so ordering of event producers and
/// consumers is not critical (although poorly-planned ordering may cause accumulating lag).
/// If events are not handled by the end of the frame after they are updated, they will be
/// dropped silently.
///
/// # Example
/// ```
/// use bevy_ecs::event::Events;
///
/// struct MyEvent {
/// value: usize
/// }
///
/// // setup
/// let mut events = Events::<MyEvent>::default();
/// let mut reader = events.get_reader();
///
/// // run this once per update/frame
/// events.update();
///
/// // somewhere else: send an event
/// events.send(MyEvent { value: 1 });
///
/// // somewhere else: read the events
/// for event in reader.iter(&events) {
/// assert_eq!(event.value, 1)
/// }
///
/// // events are only processed once per reader
/// assert_eq!(reader.iter(&events).count(), 0);
/// ```
///
/// # Details
///
/// [`Events`] is implemented using a double buffer. Each call to [`Events::update`] swaps buffers
/// and clears out the oldest buffer. [`EventReader`]s that read at least once per update will never
/// drop events. [`EventReader`]s that read once within two updates might still receive some events.
/// [`EventReader`]s that read after two updates are guaranteed to drop all events that occurred
/// before those updates.
///
/// The buffers in [`Events`] will grow indefinitely if [`Events::update`] is never called.
///
/// An alternative call pattern would be to call [`Events::update`] manually across frames to
/// control when events are cleared.
/// This complicates consumption and risks ever-expanding memory usage if not cleaned up,
/// but can be done by adding your event as a resource instead of using [`App::add_event`].
///
/// [`App::add_event`]: https://docs.rs/bevy/*/bevy/app/struct.App.html#method.add_event
#[derive(Debug)]
pub struct Events<T> {
events_a: Vec<EventInstance<T>>,
events_b: Vec<EventInstance<T>>,
a_start_event_count: usize,
b_start_event_count: usize,
event_count: usize,
state: State,
}
impl<T> Default for Events<T> {
fn default() -> Self {
Events {
a_start_event_count: 0,
b_start_event_count: 0,
event_count: 0,
events_a: Vec::new(),
events_b: Vec::new(),
state: State::A,
}
}
}
fn map_instance_event_with_id<T>(event_instance: &EventInstance<T>) -> (&T, EventId<T>) {
(&event_instance.event, event_instance.event_id)
}
fn map_instance_event<T>(event_instance: &EventInstance<T>) -> &T {
&event_instance.event
}
/// Reads events of type `T` in order and tracks which events have already been read.
#[derive(SystemParam)]
pub struct EventReader<'w, 's, T: Resource> {
last_event_count: Local<'s, (usize, PhantomData<T>)>,
events: Res<'w, Events<T>>,
}
/// Sends events of type `T`.
#[derive(SystemParam)]
pub struct EventWriter<'w, 's, T: Resource> {
events: ResMut<'w, Events<T>>,
#[system_param(ignore)]
marker: PhantomData<&'s usize>,
}
impl<'w, 's, T: Resource> EventWriter<'w, 's, T> {
pub fn send(&mut self, event: T) {
self.events.send(event);
}
pub fn send_batch(&mut self, events: impl Iterator<Item = T>) {
self.events.extend(events);
}
}
pub struct ManualEventReader<T> {
last_event_count: usize,
_marker: PhantomData<T>,
}
impl<T> Default for ManualEventReader<T> {
fn default() -> Self {
ManualEventReader {
last_event_count: 0,
_marker: Default::default(),
}
}
}
impl<T> ManualEventReader<T> {
/// See [`EventReader::iter`]
pub fn iter<'a>(&mut self, events: &'a Events<T>) -> impl DoubleEndedIterator<Item = &'a T> {
internal_event_reader(&mut self.last_event_count, events).map(|(e, _)| e)
}
/// See [`EventReader::iter_with_id`]
pub fn iter_with_id<'a>(
&mut self,
events: &'a Events<T>,
) -> impl DoubleEndedIterator<Item = (&'a T, EventId<T>)> {
internal_event_reader(&mut self.last_event_count, events)
}
}
/// Like [`iter_with_id`](EventReader::iter_with_id) except not emitting any traces for read
/// messages.
fn internal_event_reader<'a, T>(
last_event_count: &mut usize,
events: &'a Events<T>,
) -> impl DoubleEndedIterator<Item = (&'a T, EventId<T>)> {
// if the reader has seen some of the events in a buffer, find the proper index offset.
// otherwise read all events in the buffer
let a_index = if *last_event_count > events.a_start_event_count {
*last_event_count - events.a_start_event_count
} else {
0
};
let b_index = if *last_event_count > events.b_start_event_count {
*last_event_count - events.b_start_event_count
} else {
0
};
*last_event_count = events.event_count;
match events.state {
State::A => events
.events_b
.get(b_index..)
.unwrap_or_else(|| &[])
.iter()
.map(map_instance_event_with_id)
.chain(
events
.events_a
.get(a_index..)
.unwrap_or_else(|| &[])
.iter()
.map(map_instance_event_with_id),
),
State::B => events
.events_a
.get(a_index..)
.unwrap_or_else(|| &[])
.iter()
.map(map_instance_event_with_id)
.chain(
events
.events_b
.get(b_index..)
.unwrap_or_else(|| &[])
.iter()
.map(map_instance_event_with_id),
),
}
}
impl<'w, 's, T: Resource> EventReader<'w, 's, T> {
/// Iterates over the events this [`EventReader`] has not seen yet. This updates the
/// [`EventReader`]'s event counter, which means subsequent event reads will not include events
/// that happened before now.
pub fn iter(&mut self) -> impl DoubleEndedIterator<Item = &T> {
self.iter_with_id().map(|(event, _id)| event)
}
/// Like [`iter`](Self::iter), except also returning the [`EventId`] of the events.
pub fn iter_with_id(&mut self) -> impl DoubleEndedIterator<Item = (&T, EventId<T>)> {
internal_event_reader(&mut self.last_event_count.0, &self.events).map(|(event, id)| {
trace!("EventReader::iter() -> {}", id);
(event, id)
})
}
}
impl<T: Resource> Events<T> {
/// "Sends" an `event` by writing it to the current event buffer. [`EventReader`]s can then read
/// the event.
pub fn send(&mut self, event: T) {
let event_id = EventId {
id: self.event_count,
_marker: PhantomData,
};
trace!("Events::send() -> id: {}", event_id);
let event_instance = EventInstance { event_id, event };
match self.state {
State::A => self.events_a.push(event_instance),
State::B => self.events_b.push(event_instance),
}
self.event_count += 1;
}
/// Gets a new [`ManualEventReader`]. This will include all events already in the event buffers.
pub fn get_reader(&self) -> ManualEventReader<T> {
ManualEventReader {
last_event_count: 0,
_marker: PhantomData,
}
}
/// Gets a new [`ManualEventReader`]. This will ignore all events already in the event buffers.
/// It will read all future events.
pub fn get_reader_current(&self) -> ManualEventReader<T> {
ManualEventReader {
last_event_count: self.event_count,
_marker: PhantomData,
}
}
/// Swaps the event buffers and clears the oldest event buffer. In general, this should be
/// called once per frame/update.
pub fn update(&mut self) {
match self.state {
State::A => {
self.events_b.clear();
self.state = State::B;
self.b_start_event_count = self.event_count;
}
State::B => {
self.events_a.clear();
self.state = State::A;
self.a_start_event_count = self.event_count;
}
}
}
/// A system that calls [`Events::update`] once per frame.
pub fn update_system(mut events: ResMut<Self>) {
events.update();
}
#[inline]
fn reset_start_event_count(&mut self) {
self.a_start_event_count = self.event_count;
self.b_start_event_count = self.event_count;
}
/// Removes all events.
#[inline]
pub fn clear(&mut self) {
self.reset_start_event_count();
self.events_a.clear();
self.events_b.clear();
}
/// Returns true if there are no events in this collection.
#[inline]
pub fn is_empty(&self) -> bool {
self.events_a.is_empty() && self.events_b.is_empty()
}
/// Creates a draining iterator that removes all events.
pub fn drain(&mut self) -> impl Iterator<Item = T> + '_ {
self.reset_start_event_count();
let map = |i: EventInstance<T>| i.event;
match self.state {
State::A => self
.events_b
.drain(..)
.map(map)
.chain(self.events_a.drain(..).map(map)),
State::B => self
.events_a
.drain(..)
.map(map)
.chain(self.events_b.drain(..).map(map)),
}
}
/// Iterates over events that happened since the last "update" call.
/// WARNING: You probably don't want to use this call. In most cases you should use an
/// `EventReader`. You should only use this if you know you only need to consume events
/// between the last `update()` call and your call to `iter_current_update_events`.
/// If events happen outside that window, they will not be handled. For example, any events that
/// happen after this call and before the next `update()` call will be dropped.
pub fn iter_current_update_events(&self) -> impl DoubleEndedIterator<Item = &T> {
match self.state {
State::A => self.events_a.iter().map(map_instance_event),
State::B => self.events_b.iter().map(map_instance_event),
}
}
}
impl<T> std::iter::Extend<T> for Events<T> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
let mut event_count = self.event_count;
let events = iter.into_iter().map(|event| {
let event_id = EventId {
id: event_count,
_marker: PhantomData,
};
event_count += 1;
EventInstance { event_id, event }
});
match self.state {
State::A => self.events_a.extend(events),
State::B => self.events_b.extend(events),
}
trace!(
"Events::extend() -> ids: ({}..{})",
self.event_count,
event_count
);
self.event_count = event_count;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct TestEvent {
i: usize,
}
#[test]
fn test_events() {
let mut events = Events::<TestEvent>::default();
let event_0 = TestEvent { i: 0 };
let event_1 = TestEvent { i: 1 };
let event_2 = TestEvent { i: 2 };
// this reader will miss event_0 and event_1 because it wont read them over the course of
// two updates
let mut reader_missed = events.get_reader();
let mut reader_a = events.get_reader();
events.send(event_0);
assert_eq!(
get_events(&events, &mut reader_a),
vec![event_0],
"reader_a created before event receives event"
);
assert_eq!(
get_events(&events, &mut reader_a),
vec![],
"second iteration of reader_a created before event results in zero events"
);
let mut reader_b = events.get_reader();
assert_eq!(
get_events(&events, &mut reader_b),
vec![event_0],
"reader_b created after event receives event"
);
assert_eq!(
get_events(&events, &mut reader_b),
vec![],
"second iteration of reader_b created after event results in zero events"
);
events.send(event_1);
let mut reader_c = events.get_reader();
assert_eq!(
get_events(&events, &mut reader_c),
vec![event_0, event_1],
"reader_c created after two events receives both events"
);
assert_eq!(
get_events(&events, &mut reader_c),
vec![],
"second iteration of reader_c created after two event results in zero events"
);
assert_eq!(
get_events(&events, &mut reader_a),
vec![event_1],
"reader_a receives next unread event"
);
events.update();
let mut reader_d = events.get_reader();
events.send(event_2);
assert_eq!(
get_events(&events, &mut reader_a),
vec![event_2],
"reader_a receives event created after update"
);
assert_eq!(
get_events(&events, &mut reader_b),
vec![event_1, event_2],
"reader_b receives events created before and after update"
);
assert_eq!(
get_events(&events, &mut reader_d),
vec![event_0, event_1, event_2],
"reader_d receives all events created before and after update"
);
events.update();
assert_eq!(
get_events(&events, &mut reader_missed),
vec![event_2],
"reader_missed missed events unread after two update() calls"
);
}
fn get_events(
events: &Events<TestEvent>,
reader: &mut ManualEventReader<TestEvent>,
) -> Vec<TestEvent> {
reader.iter(events).cloned().collect::<Vec<TestEvent>>()
}
#[derive(PartialEq, Eq, Debug)]
struct E(usize);
fn events_clear_and_read_impl(clear_func: impl FnOnce(&mut Events<E>)) {
let mut events = Events::<E>::default();
let mut reader = events.get_reader();
assert!(reader.iter(&events).next().is_none());
events.send(E(0));
assert_eq!(*reader.iter(&events).next().unwrap(), E(0));
assert_eq!(reader.iter(&events).next(), None);
events.send(E(1));
clear_func(&mut events);
assert!(reader.iter(&events).next().is_none());
events.send(E(2));
events.update();
events.send(E(3));
assert!(reader.iter(&events).eq([E(2), E(3)].iter()));
}
#[test]
fn test_events_clear_and_read() {
events_clear_and_read_impl(|events| events.clear());
}
#[test]
fn test_events_drain_and_read() {
events_clear_and_read_impl(|events| {
assert!(events.drain().eq(vec![E(0), E(1)].into_iter()));
});
}
#[test]
fn test_events_extend_impl() {
let mut events = Events::<TestEvent>::default();
let mut reader = events.get_reader();
events.extend(vec![TestEvent { i: 0 }, TestEvent { i: 1 }]);
assert!(reader
.iter(&events)
.eq([TestEvent { i: 0 }, TestEvent { i: 1 }].iter()));
}
#[test]
fn test_events_empty() {
let mut events = Events::<TestEvent>::default();
assert!(events.is_empty());
events.send(TestEvent { i: 0 });
assert!(!events.is_empty());
events.update();
assert!(!events.is_empty());
// events are only empty after the second call to update
// due to double buffering.
events.update();
assert!(events.is_empty());
}
}
| 30.654701 | 100 | 0.57949 |
164809350b41459cee4ac52e8f871d2ec5496ef3
| 2,162 |
use std::prelude::v1::*;
use bitvec::prelude::*;
use serde_test::{
Token,
assert_ser_tokens,
};
//#[cfg(feature = "alloc")]
use serde_test::assert_de_tokens;
macro_rules! bvtok {
( s $elts:expr, $head:expr, $bits:expr, $ty:ident $( , $data:expr )* ) => {
&[
Token::Struct { name: "BitSet", len: 3, },
Token::Str("head"), Token::U8( $head ),
Token::Str("bits"), Token::U64( $bits ),
Token::Str("data"), Token::Seq { len: Some( $elts ) },
$( Token:: $ty ( $data ), )*
Token::SeqEnd,
Token::StructEnd,
]
};
( d $elts:expr, $head:expr, $bits:expr, $ty:ident $( , $data:expr )* ) => {
&[
Token::Struct { name: "BitSet", len: 3, },
Token::BorrowedStr("head"), Token::U8( $head ),
Token::BorrowedStr("bits"), Token::U64( $bits ),
Token::BorrowedStr("data"), Token::Seq { len: Some( $elts ) },
$( Token:: $ty ( $data ), )*
Token::SeqEnd,
Token::StructEnd,
]
};
}
//#[test]
pub fn empty() {
let slice = BitSlice::<BigEndian, u8>::empty();
assert_ser_tokens(&slice, bvtok![s 0, 0, 0, U8]);
//#[cfg(feature = "alloc")]
assert_de_tokens(&bitvec![], bvtok![ d 0, 0, 0, U8 ]);
}
//#[cfg(feature = "alloc")]
//#[test]
pub fn small() {
let bv = bitvec![1; 5];
let bs = &bv[1 ..];
assert_ser_tokens(&bs, bvtok![s 1, 1, 4, U8, 0b1111_1000]);
let bv = bitvec![LittleEndian, u16; 1; 12];
assert_ser_tokens(&bv, bvtok![s 1, 0, 12, U16, 0b00001111_11111111]);
let bb: BitBox<_, _> = bitvec![LittleEndian, u32; 1; 10].into();
assert_ser_tokens(&bb, bvtok![s 1, 0, 10, U32, 0x00_00_03_FF]);
}
//#[cfg(feature = "alloc")]
//#[test]
pub fn wide() {
let src: &[u8] = &[0, !0];
let bs: &BitSlice = src.into();
assert_ser_tokens(&(&bs[1 .. 15]), bvtok![s 2, 1, 14, U8, 0, !0]);
}
//#[cfg(feature = "alloc")]
//#[test]
//#[cfg(feature = "alloc")]
pub fn deser() {
let bv = bitvec![0, 1, 1, 0, 1, 0];
assert_de_tokens(&bv, bvtok![d 1, 0, 6, U8, 0b0110_1000]);
// test that the bits outside the bits domain don't matter in deser
assert_de_tokens(&bv, bvtok![d 1, 0, 6, U8, 0b0110_1001]);
assert_de_tokens(&bv, bvtok![d 1, 0, 6, U8, 0b0110_1010]);
assert_de_tokens(&bv, bvtok![d 1, 0, 6, U8, 0b0110_1011]);
}
| 27.717949 | 76 | 0.582331 |
eda3e2cac713b617bb9d3a8237cbf421d0e3cf48
| 31,729 |
use super::errors::*;
use super::{DmlBase, Handler, HandlerTypes, Object, PodType};
use crate::allocator::{Action, SegmentAllocator, SegmentId};
use crate::cache::{AddSize, Cache, ChangeKeyError, RemoveError};
use crate::checksum::{Builder, Checksum, State};
use crate::compression::{Compress, Compression};
use crate::size::{SizeMut, StaticSize};
use crate::storage_pool::{DiskOffset, StoragePoolLayer};
use crate::vdev::{Block, BLOCK_SIZE};
use futures::executor::block_on;
use futures::future::ok;
use futures::prelude::*;
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use serde::de::DeserializeOwned;
use serde::ser::Error as SerError;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use stable_deref_trait::StableDeref;
use std::collections::HashMap;
use std::mem::{replace, transmute, ManuallyDrop};
use std::ops::{Deref, DerefMut};
use std::pin::Pin;
use std::sync::atomic::{AtomicU64, Ordering};
use std::thread::yield_now;
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
pub struct ModifiedObjectId(u64);
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
pub enum ObjectKey<G> {
Unmodified { offset: DiskOffset, generation: G },
Modified(ModifiedObjectId),
InWriteback(ModifiedObjectId),
}
pub enum ObjectRef<P> {
Unmodified(P),
Modified(ModifiedObjectId),
InWriteback(ModifiedObjectId),
}
impl<C, D, I, G> super::ObjectRef for ObjectRef<ObjectPointer<C, D, I, G>>
where
C: 'static,
D: 'static,
I: 'static,
G: Copy + 'static,
ObjectPointer<C, D, I, G>: Serialize + DeserializeOwned + StaticSize,
{
type ObjectPointer = ObjectPointer<C, D, I, G>;
fn get_unmodified(&self) -> Option<&ObjectPointer<C, D, I, G>> {
if let ObjectRef::Unmodified(ref p) = *self {
Some(p)
} else {
None
}
}
}
impl<C, D, I, G: Copy> ObjectRef<ObjectPointer<C, D, I, G>> {
fn as_key(&self) -> ObjectKey<G> {
match *self {
ObjectRef::Unmodified(ref ptr) => ObjectKey::Unmodified {
offset: ptr.offset,
generation: ptr.generation,
},
ObjectRef::Modified(mid) => ObjectKey::Modified(mid),
ObjectRef::InWriteback(mid) => ObjectKey::InWriteback(mid),
}
}
}
impl<P: StaticSize> StaticSize for ObjectRef<P> {
fn size() -> usize {
P::size()
}
}
impl<P: Serialize> Serialize for ObjectRef<P> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match *self {
ObjectRef::Modified(_) => Err(S::Error::custom(
"ObjectRef: Tried to serialize a modified ObjectRef",
)),
ObjectRef::InWriteback(_) => Err(S::Error::custom(
"ObjectRef: Tried to serialize a modified ObjectRef which is currently written back",
)),
ObjectRef::Unmodified(ref ptr) => ptr.serialize(serializer),
}
}
}
impl<'de, C, D, I, G: Copy> Deserialize<'de> for ObjectRef<ObjectPointer<C, D, I, G>>
where
ObjectPointer<C, D, I, G>: Deserialize<'de>,
{
fn deserialize<E>(deserializer: E) -> Result<Self, E::Error>
where
E: Deserializer<'de>,
{
ObjectPointer::<C, D, I, G>::deserialize(deserializer).map(ObjectRef::Unmodified)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ObjectPointer<C, D, I, G> {
compression: C,
checksum: D,
offset: DiskOffset,
size: Block<u32>,
info: I,
generation: G,
}
impl<C: StaticSize, D: StaticSize, I: StaticSize, G: StaticSize> StaticSize
for ObjectPointer<C, D, I, G>
{
fn size() -> usize {
C::size() + D::size() + I::size() + G::size() + <DiskOffset as StaticSize>::size() + 4
}
}
impl<C, D, I, G: Copy> From<ObjectPointer<C, D, I, G>> for ObjectRef<ObjectPointer<C, D, I, G>> {
fn from(ptr: ObjectPointer<C, D, I, G>) -> Self {
ObjectRef::Unmodified(ptr)
}
}
impl<C, D, I, G: Copy> ObjectPointer<C, D, I, G> {
pub fn offset(&self) -> DiskOffset {
self.offset
}
pub fn size(&self) -> Block<u32> {
self.size
}
pub fn generation(&self) -> G {
self.generation
}
}
/// The Data Management Unit.
pub struct Dmu<C: 'static, E: 'static, SPL: StoragePoolLayer, H: 'static, I: 'static, G: 'static> {
default_compression: C,
default_checksum_builder: <SPL::Checksum as Checksum>::Builder,
pool: SPL,
cache: RwLock<E>,
written_back: Mutex<HashMap<ModifiedObjectId, ObjectPointer<C, SPL::Checksum, I, G>>>,
modified_info: Mutex<HashMap<ModifiedObjectId, I>>,
handler: H,
allocation_data: Box<[Mutex<Option<(SegmentId, SegmentAllocator)>>]>,
next_modified_node_id: AtomicU64,
next_disk_id: AtomicU64,
}
impl<C, E, SPL, H> Dmu<C, E, SPL, H, H::Info, H::Generation>
where
SPL: StoragePoolLayer,
H: HandlerTypes,
{
/// Returns a new `Dmu`.
pub fn new(
default_compression: C,
default_checksum_builder: <SPL::Checksum as Checksum>::Builder,
pool: SPL,
cache: E,
handler: H,
) -> Self {
let disk_cnt = pool.disk_count();
Dmu {
default_compression,
default_checksum_builder,
pool,
cache: RwLock::new(cache),
written_back: Mutex::new(HashMap::new()),
modified_info: Mutex::new(HashMap::new()),
handler,
allocation_data: (0..disk_cnt)
.map(|_| Mutex::new(None))
.collect::<Vec<_>>()
.into_boxed_slice(),
next_modified_node_id: AtomicU64::new(1),
next_disk_id: AtomicU64::new(0),
}
}
/// Returns the underlying handler.
pub fn handler(&self) -> &H {
&self.handler
}
/// Returns the underlying cache.
pub fn cache(&self) -> &RwLock<E> {
&self.cache
}
/// Returns the underlying storage pool.
pub fn pool(&self) -> &SPL {
&self.pool
}
}
impl<C, E, SPL, H> DmlBase for Dmu<C, E, SPL, H, H::Info, H::Generation>
where
C: Compression + StaticSize,
E: Cache<Key = ObjectKey<H::Generation>>,
SPL: StoragePoolLayer,
SPL::Checksum: StaticSize,
H: HandlerTypes,
{
type ObjectRef = ObjectRef<Self::ObjectPointer>;
type ObjectPointer = ObjectPointer<C, SPL::Checksum, H::Info, H::Generation>;
type Info = H::Info;
}
impl<C, E, SPL, H, I, G> Dmu<C, E, SPL, H, I, G>
where
C: Compression + StaticSize,
E: Cache<Key = ObjectKey<G>, Value = RwLock<H::Object>>,
SPL: StoragePoolLayer,
SPL::Checksum: StaticSize,
H: Handler<ObjectRef<ObjectPointer<C, SPL::Checksum, I, G>>, Info = I, Generation = G>,
H::Object: Object<ObjectRef<ObjectPointer<C, SPL::Checksum, I, G>>>,
I: PodType,
G: PodType,
{
fn steal(
&self,
or: &mut <Self as DmlBase>::ObjectRef,
info: H::Info,
) -> Result<Option<<Self as super::HandlerDml>::CacheValueRefMut>, Error> {
let mid = self.next_modified_node_id.fetch_add(1, Ordering::Relaxed);
let mid = ModifiedObjectId(mid);
let entry = {
let mut cache = self.cache.write();
let was_present = cache.force_change_key(&or.as_key(), ObjectKey::Modified(mid));
if !was_present {
return Ok(None);
}
self.modified_info.lock().insert(mid, info);
cache.get(&ObjectKey::Modified(mid), false).unwrap()
};
let obj = CacheValueRef::write(entry);
if let ObjectRef::Unmodified(ptr) = replace(or, ObjectRef::Modified(mid)) {
let actual_size = self.pool.actual_size(ptr.offset.disk_id() as u16, ptr.size);
self.handler
.copy_on_write(ptr.offset, actual_size, ptr.generation, ptr.info);
}
Ok(Some(obj))
}
/// Will be called when `or` is not in cache but was modified.
/// Resolves two cases:
/// - Previous write back (`Modified(_)`) Will change `or` to
/// `InWriteback(_)`.
/// - Previous eviction after write back (`InWriteback(_)`) Will change
/// `or` to `Unmodified(_)`.
fn fix_or(&self, or: &mut <Self as DmlBase>::ObjectRef) {
match *or {
ObjectRef::Unmodified(_) => unreachable!(),
ObjectRef::Modified(mid) => {
*or = ObjectRef::InWriteback(mid);
}
ObjectRef::InWriteback(mid) => {
// The object must have been written back recently.
let ptr = self.written_back.lock().remove(&mid).unwrap();
*or = ObjectRef::Unmodified(ptr);
}
}
}
/// Fetches asynchronously an object from disk and inserts it into the
/// cache.
fn fetch(&self, op: &<Self as DmlBase>::ObjectPointer) -> Result<(), Error> {
let compression = op.compression.clone();
let offset = op.offset;
let generation = op.generation;
let compressed_data = self.pool.read(op.size, op.offset, op.checksum.clone())?;
let object: H::Object = {
let data = compression
.decompress(compressed_data)
.chain_err(|| ErrorKind::DecompressionError)?;
Object::unpack(data).chain_err(|| ErrorKind::DeserializationError)?
};
let key = ObjectKey::Unmodified { offset, generation };
self.insert_object_into_cache(key, RwLock::new(object));
Ok(())
}
/// Fetches asynchronously an object from disk and inserts it into the
/// cache.
fn try_fetch_async(
&self,
op: &<Self as DmlBase>::ObjectPointer,
) -> Result<
impl TryFuture<
Ok = (
ObjectPointer<C, <SPL as StoragePoolLayer>::Checksum, I, G>,
Box<[u8]>,
),
Error = Error,
> + Send,
Error,
> {
let ptr = op.clone();
Ok(self
.pool
.read_async(op.size, op.offset, op.checksum.clone())?
.map_err(Error::from)
.and_then(move |data| ok((ptr, data))))
}
fn insert_object_into_cache(&self, key: ObjectKey<H::Generation>, mut object: E::Value) {
let size = object.get_mut().size();
let mut cache = self.cache.write();
if !cache.contains_key(&key) {
cache.insert(key, object, size);
}
}
fn evict(&self, mut cache: RwLockWriteGuard<E>) -> Result<(), Error> {
// TODO we may need to evict multiple objects
// Algorithm overview:
// Find some evictable object
// - unpinned
// - not in writeback state
// - can_be_evicted
// If it's `Unmodified` -> done
// Change its key (`Modified`) to `InWriteback`
// Pin object
// Unlock cache
// Serialize, compress, checksum
// Fetch generation
// Allocate
// Write out
// Try to remove from cache
// If ok -> done
// If this fails, call copy_on_write as object has been modified again
let evict_result = cache.evict(|&key, entry, cache_contains_key| {
let object = entry.get_mut();
let can_be_evicted = match key {
ObjectKey::InWriteback(_) => false,
ObjectKey::Unmodified { .. } => true,
ObjectKey::Modified(_) => object
.for_each_child(|or| {
let is_unmodified = loop {
if let ObjectRef::Unmodified(_) = *or {
break true;
}
if cache_contains_key(&or.as_key()) {
break false;
}
self.fix_or(or);
};
if is_unmodified {
Ok(())
} else {
Err(())
}
})
.is_ok(),
};
if can_be_evicted {
Some(object.size())
} else {
None
}
});
let (key, mut object) = match evict_result {
None => return Ok(()),
Some((key, object)) => (key, object),
};
let mid = match key {
ObjectKey::InWriteback(_) => unreachable!(),
ObjectKey::Unmodified { .. } => return Ok(()),
ObjectKey::Modified(mid) => mid,
};
let size = object.get_mut().size();
cache.insert(ObjectKey::InWriteback(mid), object, size);
let entry = cache.get(&ObjectKey::InWriteback(mid), false).unwrap();
drop(cache);
let object = CacheValueRef::read(entry);
self.handle_write_back(object, mid, true)?;
Ok(())
}
fn handle_write_back(
&self,
object: <Self as super::HandlerDml>::CacheValueRef,
mid: ModifiedObjectId,
evict: bool,
) -> Result<<Self as DmlBase>::ObjectPointer, Error> {
let object_size = super::Size::size(&*object);
if object_size > 4 * 1024 * 1024 {
warn!("Writing back large object: {}", object.debug_info());
}
let compression = self.default_compression.clone();
let generation = self.handler.current_generation();
let mut compressed_data = {
let mut compress = compression.compress();
{
object
.pack(&mut compress)
.chain_err(|| ErrorKind::SerializationError)?;
drop(object);
}
compress.finish()
};
let info = self.modified_info.lock().remove(&mid).unwrap();
assert!(compressed_data.len() <= u32::max_value() as usize);
let size = compressed_data.len();
let size = Block(((size as usize + BLOCK_SIZE - 1) / BLOCK_SIZE) as u32);
assert!(size.to_bytes() as usize >= compressed_data.len());
let offset = self.allocate(size)?;
if size.to_bytes() as usize != compressed_data.len() {
let mut v = compressed_data.into_vec();
v.resize(size.to_bytes() as usize, 0);
compressed_data = v.into_boxed_slice();
}
let checksum = {
let mut state = self.default_checksum_builder.build();
state.ingest(&compressed_data);
state.finish()
};
self.pool.begin_write(compressed_data, offset)?;
let obj_ptr = ObjectPointer {
offset,
size,
checksum,
compression,
generation,
info,
};
let was_present;
{
let mut cache = self.cache.write();
// We can safely ignore pins.
// If it's pinned, it must be a readonly request.
was_present = if evict {
cache.force_remove(&ObjectKey::InWriteback(mid), object_size)
} else {
cache.force_change_key(
&ObjectKey::InWriteback(mid),
ObjectKey::Unmodified {
offset: obj_ptr.offset,
generation: obj_ptr.generation,
},
)
};
if was_present {
self.written_back.lock().insert(mid, obj_ptr.clone());
}
}
if !was_present {
// The object has been `stolen`. Notify the handler.
let actual_size = self
.pool
.actual_size(obj_ptr.offset.disk_id() as u16, obj_ptr.size);
self.handler.copy_on_write(
obj_ptr.offset,
actual_size,
obj_ptr.generation,
obj_ptr.info,
);
}
Ok(obj_ptr)
}
fn allocate(&self, size: Block<u32>) -> Result<DiskOffset, Error> {
if size >= Block(2048) {
warn!("Very large allocation requested: {:?}", size);
}
let start_disk_id = (self.next_disk_id.fetch_add(1, Ordering::Relaxed)
% u64::from(self.pool.disk_count())) as u16;
let disk_id = (start_disk_id..self.pool.disk_count())
.chain(0..start_disk_id)
.max_by_key(|&disk_id| {
self.pool
.effective_free_size(disk_id, self.handler.get_free_space(disk_id))
})
.unwrap();
let size = self.pool.actual_size(disk_id, size);
let disk_size = self.pool.size_in_blocks(disk_id);
let disk_offset = {
let mut x = self.allocation_data[disk_id as usize].lock();
if x.is_none() {
let segment_id = SegmentId::get(DiskOffset::new(disk_id as usize, Block(0)));
let allocator = self
.handler
.get_allocation_bitmap(segment_id, self)
.chain_err(|| ErrorKind::HandlerError)?;
*x = Some((segment_id, allocator));
}
let &mut (ref mut segment_id, ref mut allocator) = x.as_mut().unwrap();
let first_seen_segment_id = *segment_id;
loop {
if let Some(segment_offset) = allocator.allocate(size.as_u32()) {
break segment_id.disk_offset(segment_offset);
}
let next_segment_id = segment_id.next(disk_size);
trace!(
"Next allocator segment: {:?} -> {:?} ({:?})",
segment_id,
next_segment_id,
disk_size,
);
if next_segment_id == first_seen_segment_id {
bail!(ErrorKind::OutOfSpaceError);
}
*allocator = self
.handler
.get_allocation_bitmap(next_segment_id, self)
.chain_err(|| ErrorKind::HandlerError)?;
*segment_id = next_segment_id;
}
};
info!("Allocated {:?} at {:?}", size, disk_offset);
self.handler
.update_allocation_bitmap(disk_offset, size, Action::Allocate, self)
.chain_err(|| ErrorKind::HandlerError)?;
Ok(disk_offset)
}
/// Tries to allocate `size` blocks at `disk_offset`. Might fail if
/// already in use.
pub fn allocate_raw_at(&self, disk_offset: DiskOffset, size: Block<u32>) -> Result<(), Error> {
let disk_id = disk_offset.disk_id();
let num_disks = self.pool.num_disks(disk_id as u16);
let size = size * num_disks as u32;
let segment_id = SegmentId::get(disk_offset);
let mut x = self.allocation_data[disk_id as usize].lock();
let mut allocator = self
.handler
.get_allocation_bitmap(segment_id, self)
.chain_err(|| ErrorKind::HandlerError)?;
if allocator.allocate_at(size.as_u32(), SegmentId::get_block_offset(disk_offset)) {
*x = Some((segment_id, allocator));
self.handler
.update_allocation_bitmap(disk_offset, size, Action::Allocate, self)
.chain_err(|| ErrorKind::HandlerError)?;
Ok(())
} else {
bail!("Cannot allocate raw at {:?} / {:?}", disk_offset, size)
}
}
fn prepare_write_back(
&self,
mid: ModifiedObjectId,
dep_mids: &mut Vec<ModifiedObjectId>,
) -> Result<Option<<Self as super::HandlerDml>::CacheValueRef>, ()> {
loop {
let mut cache = self.cache.write();
if cache.contains_key(&ObjectKey::InWriteback(mid)) {
// TODO wait
drop(cache);
yield_now();
continue;
}
let result =
cache.change_key(&ObjectKey::Modified(mid), |_, entry, cache_contains_key| {
let object = entry.get_mut();
let mut modified_children = false;
object
.for_each_child::<(), _>(|or| loop {
let mid = match *or {
ObjectRef::Unmodified(_) => break Ok(()),
ObjectRef::InWriteback(mid) | ObjectRef::Modified(mid) => mid,
};
if cache_contains_key(&or.as_key()) {
modified_children = true;
dep_mids.push(mid);
break Ok(());
}
self.fix_or(or);
})
.unwrap();
if modified_children {
Err(())
} else {
Ok(ObjectKey::InWriteback(mid))
}
});
return match result {
Ok(()) => Ok(Some(
cache
.get(&ObjectKey::InWriteback(mid), false)
.map(CacheValueRef::read)
.unwrap(),
)),
Err(ChangeKeyError::NotPresent) => Ok(None),
Err(ChangeKeyError::Pinned) => {
// TODO wait
warn!("Pinned node");
drop(cache);
yield_now();
continue;
}
Err(ChangeKeyError::CallbackError(())) => Err(()),
};
}
}
}
impl<C, E, SPL, H, I, G> super::HandlerDml for Dmu<C, E, SPL, H, I, G>
where
C: Compression + StaticSize,
E: Cache<Key = ObjectKey<G>, Value = RwLock<H::Object>>,
SPL: StoragePoolLayer,
SPL::Checksum: StaticSize,
H: Handler<ObjectRef<ObjectPointer<C, SPL::Checksum, I, G>>, Info = I, Generation = G>,
H::Object: Object<<Self as DmlBase>::ObjectRef>,
I: PodType,
G: PodType,
{
type Object = H::Object;
type CacheValueRef = CacheValueRef<E::ValueRef, RwLockReadGuard<'static, H::Object>>;
type CacheValueRefMut = CacheValueRef<E::ValueRef, RwLockWriteGuard<'static, H::Object>>;
fn try_get(&self, or: &Self::ObjectRef) -> Option<Self::CacheValueRef> {
let result = {
// Drop order important
let cache = self.cache.read();
cache.get(&or.as_key(), false)
};
result.map(CacheValueRef::read)
}
fn try_get_mut(&self, or: &Self::ObjectRef) -> Option<Self::CacheValueRefMut> {
if let ObjectRef::Modified(_) = *or {
let result = {
let cache = self.cache.read();
cache.get(&or.as_key(), true)
};
result.map(CacheValueRef::write)
} else {
None
}
}
fn get(&self, or: &mut Self::ObjectRef) -> Result<Self::CacheValueRef, Error> {
let mut cache = self.cache.read();
loop {
if let Some(entry) = cache.get(&or.as_key(), true) {
drop(cache);
return Ok(CacheValueRef::read(entry));
}
if let ObjectRef::Unmodified(ref ptr) = *or {
drop(cache);
self.fetch(ptr)?;
cache = self.cache.read();
} else {
self.fix_or(or);
}
}
}
fn get_mut(
&self,
or: &mut Self::ObjectRef,
info: Self::Info,
) -> Result<Self::CacheValueRefMut, Error> {
// Fast path
if let Some(obj) = self.try_get_mut(or) {
return Ok(obj);
}
// Object either not mutable or not present.
loop {
// Try to steal it if present.
if let Some(obj) = self.steal(or, info)? {
return Ok(obj);
}
// Fetch it.
self.get(or)?;
}
}
fn insert(&self, mut object: Self::Object, info: H::Info) -> Self::ObjectRef {
let mid = ModifiedObjectId(self.next_modified_node_id.fetch_add(1, Ordering::Relaxed));
self.modified_info.lock().insert(mid, info);
let key = ObjectKey::Modified(mid);
let size = object.size();
self.cache.write().insert(key, RwLock::new(object), size);
ObjectRef::Modified(mid)
}
fn insert_and_get_mut(
&self,
mut object: Self::Object,
info: Self::Info,
) -> (Self::CacheValueRefMut, Self::ObjectRef) {
let mid = ModifiedObjectId(self.next_modified_node_id.fetch_add(1, Ordering::Relaxed));
self.modified_info.lock().insert(mid, info);
let key = ObjectKey::Modified(mid);
let size = object.size();
let entry = {
let mut cache = self.cache.write();
cache.insert(key, RwLock::new(object), size);
cache.get(&key, false).unwrap()
};
(CacheValueRef::write(entry), ObjectRef::Modified(mid))
}
fn remove(&self, or: Self::ObjectRef) {
match self.cache.write().remove(&or.as_key(), |obj| obj.size()) {
Ok(_) | Err(RemoveError::NotPresent) => {}
// TODO
Err(RemoveError::Pinned) => unimplemented!(),
};
if let ObjectRef::Unmodified(ref ptr) = or {
let actual_size = self.pool.actual_size(ptr.offset.disk_id() as u16, ptr.size);
self.handler
.copy_on_write(ptr.offset, actual_size, ptr.generation, ptr.info);
}
}
fn get_and_remove(&self, mut or: Self::ObjectRef) -> Result<H::Object, Error> {
let obj = loop {
self.get(&mut or)?;
match self.cache.write().remove(&or.as_key(), |obj| obj.size()) {
Ok(obj) => break obj,
Err(RemoveError::NotPresent) => {}
// TODO
Err(RemoveError::Pinned) => unimplemented!(),
};
};
if let ObjectRef::Unmodified(ref ptr) = or {
let actual_size = self.pool.actual_size(ptr.offset.disk_id() as u16, ptr.size);
self.handler
.copy_on_write(ptr.offset, actual_size, ptr.generation, ptr.info);
}
Ok(obj.into_inner())
}
fn ref_from_ptr(r: Self::ObjectPointer) -> Self::ObjectRef {
r.into()
}
fn evict(&self) -> Result<(), Error> {
// TODO shortcut without locking cache
let cache = self.cache.write();
if cache.size() > cache.capacity() {
self.evict(cache)?;
}
Ok(())
}
}
impl<C, E, SPL, H, I, G> super::Dml for Dmu<C, E, SPL, H, I, G>
where
C: Compression + StaticSize,
E: Cache<Key = ObjectKey<G>, Value = RwLock<H::Object>>,
SPL: StoragePoolLayer,
SPL::Checksum: StaticSize,
H: Handler<ObjectRef<ObjectPointer<C, SPL::Checksum, I, G>>, Info = I, Generation = G>,
H::Object: Object<<Self as DmlBase>::ObjectRef>,
I: PodType,
G: PodType,
{
fn write_back<F, FO>(&self, mut acquire_or_lock: F) -> Result<Self::ObjectPointer, Error>
where
F: FnMut() -> FO,
FO: DerefMut<Target = Self::ObjectRef>,
{
let (object, mid) = loop {
let mut or = acquire_or_lock();
let mid = match *or {
ObjectRef::Unmodified(ref p) => return Ok(p.clone()),
ObjectRef::InWriteback(mid) | ObjectRef::Modified(mid) => mid,
};
let mut mids = Vec::new();
match self.prepare_write_back(mid, &mut mids) {
Ok(None) => self.fix_or(&mut or),
Ok(Some(object)) => break (object, mid),
Err(()) => {
drop(or);
while let Some(&mid) = mids.last() {
match self.prepare_write_back(mid, &mut mids) {
Ok(None) => {}
Ok(Some(object)) => {
self.handle_write_back(object, mid, false)?;
}
Err(()) => continue,
};
mids.pop();
}
}
}
};
self.handle_write_back(object, mid, false)
}
type Prefetch = Pin<
Box<
dyn Future<Output = Result<(<Self as DmlBase>::ObjectPointer, Box<[u8]>), Error>>
+ Send
+ 'static,
>,
>;
fn prefetch(&self, or: &Self::ObjectRef) -> Result<Option<Self::Prefetch>, Error> {
if self.cache.read().contains_key(&or.as_key()) {
return Ok(None);
}
Ok(match *or {
ObjectRef::Modified(_) | ObjectRef::InWriteback(_) => None,
ObjectRef::Unmodified(ref p) => Some(Box::pin(self.try_fetch_async(p)?.into_future())),
})
}
fn finish_prefetch(&self, p: Self::Prefetch) -> Result<(), Error> {
let (ptr, compressed_data) = block_on(p)?;
let object: H::Object = {
let data = ptr
.compression
.decompress(compressed_data)
.chain_err(|| ErrorKind::DecompressionError)?;
Object::unpack(data).chain_err(|| ErrorKind::DeserializationError)?
};
let key = ObjectKey::Unmodified {
offset: ptr.offset,
generation: ptr.generation,
};
self.insert_object_into_cache(key, RwLock::new(object));
Ok(())
}
fn drop_cache(&self) {
let mut cache = self.cache.write();
let keys: Vec<_> = cache
.iter()
.cloned()
.filter(|&key| {
if let ObjectKey::Unmodified { .. } = key {
true
} else {
false
}
})
.collect();
for key in keys {
let _ = cache.remove(&key, |obj| obj.size());
}
}
}
pub struct CacheValueRef<T, U> {
head: T,
guard: ManuallyDrop<U>,
}
impl<T, U> Drop for CacheValueRef<T, U> {
fn drop(&mut self) {
unsafe {
ManuallyDrop::drop(&mut self.guard);
}
}
}
impl<T: AddSize, U> AddSize for CacheValueRef<T, U> {
fn add_size(&self, size_delta: isize) {
self.head.add_size(size_delta)
}
}
impl<T, U> CacheValueRef<T, RwLockReadGuard<'static, U>>
where
T: StableDeref<Target = RwLock<U>>,
{
fn read(head: T) -> Self {
let guard = unsafe { transmute(RwLock::read(&head)) };
CacheValueRef {
head,
guard: ManuallyDrop::new(guard),
}
}
}
impl<T, U> CacheValueRef<T, RwLockWriteGuard<'static, U>>
where
T: StableDeref<Target = RwLock<U>>,
{
fn write(head: T) -> Self {
let guard = unsafe { transmute(RwLock::write(&head)) };
CacheValueRef {
head,
guard: ManuallyDrop::new(guard),
}
}
}
unsafe impl<T, U> StableDeref for CacheValueRef<T, RwLockReadGuard<'static, U>> {}
impl<T, U> Deref for CacheValueRef<T, RwLockReadGuard<'static, U>> {
type Target = U;
fn deref(&self) -> &U {
&*self.guard
}
}
unsafe impl<T, U> StableDeref for CacheValueRef<T, RwLockWriteGuard<'static, U>> {}
impl<T, U> Deref for CacheValueRef<T, RwLockWriteGuard<'static, U>> {
type Target = U;
fn deref(&self) -> &U {
&*self.guard
}
}
impl<T, U> DerefMut for CacheValueRef<T, RwLockWriteGuard<'static, U>> {
fn deref_mut(&mut self) -> &mut U {
&mut *self.guard
}
}
| 33.971092 | 101 | 0.524252 |
48024e18f3d0b877b208a408c90e2180928e94f8
| 512 |
#![allow(unused_must_use)]
#![allow(unreachable_code)]
pub mod cache;
pub mod completion_event_serializer;
pub mod completion_handler;
pub mod consumer;
pub mod error;
pub mod event_decoder;
pub mod event_emitter;
pub mod event_handler;
pub mod event_processor;
pub mod event_retriever;
pub mod local_sqs_service;
pub mod local_sqs_service_options;
pub mod redis_cache;
pub mod retry;
pub mod s3_event_emitter;
pub mod service_builder;
pub mod sqs_completion_handler;
pub mod sqs_consumer;
pub mod sqs_service;
| 22.26087 | 36 | 0.822266 |
f5616e6231e0ecbb48a79bdc7f7829673e201bcc
| 11,179 |
use errors::prelude::*;
use rusqlite::types::ToSql;
use services::wallet::language::{Operator, TagName, TargetValue};
// Translates Wallet Query Language to SQL
// WQL input is provided as a reference to a top level Operator
// Result is a tuple of query string and query arguments
pub fn wql_to_sql<'a>(class: &'a Vec<u8>, op: &'a Operator, _options: Option<&str>) -> Result<(String, Vec<&'a dyn ToSql>), IndyError> {
let mut arguments: Vec<&dyn ToSql> = Vec::new();
arguments.push(class);
let clause_string = operator_to_sql(op, &mut arguments)?;
const BASE: &str = "SELECT i.id, i.name, i.value, i.key, i.type FROM items as i WHERE i.type = ?";
if !clause_string.is_empty() {
let mut query_string = String::with_capacity(BASE.len() + 5 + clause_string.len());
query_string.push_str(BASE);
query_string.push_str(" AND ");
query_string.push_str(&clause_string);
Ok((query_string, arguments))
} else {
Ok((BASE.to_string(), arguments))
}
}
pub fn wql_to_sql_count<'a>(class: &'a Vec<u8>, op: &'a Operator) -> Result<(String, Vec<&'a dyn ToSql>), IndyError> {
let mut arguments: Vec<&dyn ToSql> = Vec::new();
arguments.push(class);
let clause_string = operator_to_sql(op, &mut arguments)?;
let mut query_string = "SELECT count(*) FROM items as i WHERE i.type = ?".to_string();
if !clause_string.is_empty() {
query_string.push_str(" AND ");
query_string.push_str(&clause_string);
}
Ok((query_string, arguments))
}
fn operator_to_sql<'a>(op: &'a Operator, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match *op {
Operator::Eq(ref tag_name, ref target_value) => eq_to_sql(tag_name, target_value, arguments),
Operator::Neq(ref tag_name, ref target_value) => neq_to_sql(tag_name, target_value, arguments),
Operator::Gt(ref tag_name, ref target_value) => gt_to_sql(tag_name, target_value, arguments),
Operator::Gte(ref tag_name, ref target_value) => gte_to_sql(tag_name, target_value, arguments),
Operator::Lt(ref tag_name, ref target_value) => lt_to_sql(tag_name, target_value, arguments),
Operator::Lte(ref tag_name, ref target_value) => lte_to_sql(tag_name, target_value, arguments),
Operator::Like(ref tag_name, ref target_value) => like_to_sql(tag_name, target_value, arguments),
Operator::In(ref tag_name, ref target_values) => in_to_sql(tag_name, target_values, arguments),
Operator::And(ref suboperators) => and_to_sql(suboperators, arguments),
Operator::Or(ref suboperators) => or_to_sql(suboperators, arguments),
Operator::Not(ref suboperator) => not_to_sql(suboperator, arguments),
}
}
fn eq_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value = ?))".to_string())
},
(&TagName::EncryptedTagName(ref queried_name), &TargetValue::Encrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_encrypted WHERE name = ? AND value = ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for equality operator"))
}
}
fn neq_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value != ?))".to_string())
},
(&TagName::EncryptedTagName(ref queried_name), &TargetValue::Encrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_encrypted WHERE name = ? AND value != ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for inequality operator"))
}
}
fn gt_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value > ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for $gt operator"))
}
}
fn gte_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value >= ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for $gte operator"))
}
}
fn lt_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value < ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for $lt operator"))
}
}
fn lte_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value <= ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for $lte operator"))
}
}
fn like_to_sql<'a>(name: &'a TagName, value: &'a TargetValue, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
match (name, value) {
(&TagName::PlainTagName(ref queried_name), &TargetValue::Unencrypted(ref queried_value)) => {
arguments.push(queried_name);
arguments.push(queried_value);
Ok("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value LIKE ?))".to_string())
},
_ => Err(err_msg(IndyErrorKind::WalletQueryError, "Invalid combination of tag name and value for $like operator"))
}
}
fn in_to_sql<'a>(name: &'a TagName, values: &'a Vec<TargetValue>, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
let mut in_string = String::new();
match *name {
TagName::PlainTagName(ref queried_name) => {
in_string.push_str("(i.id in (SELECT item_id FROM tags_plaintext WHERE name = ? AND value IN (");
arguments.push(queried_name);
for (index, value) in values.iter().enumerate() {
if let TargetValue::Unencrypted(ref target) = *value {
in_string.push_str("?");
arguments.push(target);
if index < values.len() - 1 {
in_string.push(',');
}
} else {
return Err(err_msg(IndyErrorKind::WalletQueryError, "Encrypted tag value in $in for nonencrypted tag name"))
}
}
Ok(in_string + ")))")
},
TagName::EncryptedTagName(ref queried_name) => {
in_string.push_str("(i.id in (SELECT item_id FROM tags_encrypted WHERE name = ? AND value IN (");
arguments.push(queried_name);
let index_before_last = values.len() - 2;
for (index, value) in values.iter().enumerate() {
if let TargetValue::Encrypted(ref target) = *value {
in_string.push_str("?");
arguments.push(target);
if index <= index_before_last {
in_string.push(',');
}
} else {
return Err(err_msg(IndyErrorKind::WalletQueryError, "Unencrypted tag value in $in for encrypted tag name"))
}
}
Ok(in_string + ")))")
},
}
}
fn and_to_sql<'a>(suboperators: &'a [Operator], arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
join_operators(suboperators, " AND ", arguments)
}
fn or_to_sql<'a>(suboperators: &'a [Operator], arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
join_operators(suboperators, " OR ", arguments)
}
fn not_to_sql<'a>(suboperator: &'a Operator, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
let suboperator_string = operator_to_sql(suboperator, arguments)?;
Ok("NOT (".to_string() + &suboperator_string + ")")
}
fn join_operators<'a>(operators: &'a [Operator], join_str: &str, arguments: &mut Vec<&'a dyn ToSql>) -> IndyResult<String> {
let mut s = String::new();
if !operators.is_empty() {
s.push('(');
for (index, operator) in operators.iter().enumerate() {
let operator_string = operator_to_sql(operator, arguments)?;
s.push_str(&operator_string);
if index < operators.len() - 1 {
s.push_str(join_str);
}
}
s.push(')');
}
Ok(s)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn simple_and() {
let condition_1 = Operator::And(vec![
Operator::Eq(TagName::EncryptedTagName(vec![1,2,3]), TargetValue::Encrypted(vec![4,5,6])),
Operator::Eq(TagName::PlainTagName(vec![7,8,9]), TargetValue::Unencrypted("spam".to_string())),
]);
let condition_2 = Operator::And(vec![
Operator::Eq(TagName::EncryptedTagName(vec![10,11,12]), TargetValue::Encrypted(vec![13,14,15])),
Operator::Not(Box::new(Operator::Eq(TagName::PlainTagName(vec![16,17,18]), TargetValue::Unencrypted("eggs".to_string()))))
]);
let query = Operator::Or(vec![condition_1, condition_2]);
let class = vec![100,100,100];
let (_query, _arguments) = wql_to_sql(&class, &query, None).unwrap();
}
}
| 45.443089 | 136 | 0.619823 |
162448f7d8197685f5950549496f01bcc3512f7a
| 16,619 |
use super::{Lexeme, Parser, SyntaxResult};
use crate::data::prelude::*;
use crate::data::{lex::Keyword, StorageClass};
use std::iter::Iterator;
type StmtResult = SyntaxResult<Stmt>;
impl<I: Iterator<Item = Lexeme>> Parser<I> {
pub fn compound_statement(&mut self) -> SyntaxResult<Stmt> {
let start = self
.expect(Token::LeftBrace)
.expect("compound_statement should be called with '{' as the next token");
let mut stmts = vec![];
let mut pending_errs = vec![];
while self.peek_token() != Some(&Token::RightBrace) {
match self.statement() {
Ok(stmt) => stmts.push(stmt),
Err(err) => {
self.panic();
pending_errs.push(err);
// prevent infinite loops if there's a syntax error at EOF
if self.peek_token().is_none() {
break;
}
}
}
}
if self.expect(Token::RightBrace).is_err() {
assert!(self.peek_token().is_none()); // from the 'break' above
let actual_err = self
.last_location
.with(SyntaxError::from("unclosed '{' delimeter at end of file"));
pending_errs.push(actual_err);
}
if let Some(err) = pending_errs.pop() {
self.error_handler.extend(pending_errs.into_iter());
return Err(err);
}
Ok(Stmt {
data: StmtType::Compound(stmts),
location: start.location,
})
}
/// statement
/// : labeled_statement
/// | compound_statement
/// | expression_statement
/// | selection_statement
/// | iteration_statement
/// | jump_statement
/// ;
///
/// labeled_statement:
/// identifier ':' statement
/// | CASE constant_expr ':' statement
/// | DEFAULT ':' statement
///
/// Result: whether there was an error in the program source
/// Option: empty semicolons still count as a statement (so case labels can work)
pub fn statement(&mut self) -> SyntaxResult<Stmt> {
let _guard = self.recursion_check();
match self.peek_token() {
Some(Token::LeftBrace) => {
self.enter_scope();
let stmts = self.compound_statement();
let location = match &stmts {
Ok(stmt) => stmt.location,
Err(err) => err.location,
};
self.leave_scope(location);
stmts
}
Some(Token::Keyword(k)) => match k {
// labeled_statement (excluding labels)
Keyword::Case => {
let kw = self.next_token().unwrap();
let expr = self.constant_expr()?;
self.expect(Token::Colon)?;
let int = match expr.expr {
ExprType::Literal(Literal::Int(i)) => i as u64,
ExprType::Literal(Literal::UnsignedInt(u)) => u,
ExprType::Literal(Literal::Char(c)) => u64::from(c),
_ => {
self.semantic_err(
"case expression is not an integer constant",
expr.location,
);
0
}
};
let inner = Box::new(self.statement()?);
Ok(Stmt {
data: StmtType::Case(int, inner),
location: kw.location,
})
}
Keyword::Default => {
let kw = self.next_token().unwrap();
self.expect(Token::Colon)?;
let inner = self.statement()?;
Ok(Stmt {
data: StmtType::Default(Box::new(inner)),
location: kw.location,
})
}
// selection_statement
Keyword::If => Ok(self.if_statement()?),
Keyword::Switch => Ok(self.switch_statement()?),
// iteration_statement
Keyword::While => Ok(self.while_statement()?),
Keyword::Do => Ok(self.do_while_statement()?),
Keyword::For => Ok(self.for_statement()?),
// jump_statement
Keyword::Goto => Ok(self.goto_statement()?),
Keyword::Continue => {
let kw = self.next_token().unwrap();
self.expect(Token::Semicolon)?;
Ok(Stmt {
data: StmtType::Continue,
location: kw.location,
})
}
Keyword::Break => {
let kw = self.next_token().unwrap();
self.expect(Token::Semicolon)?;
Ok(Stmt {
data: StmtType::Break,
location: kw.location,
})
}
Keyword::Return => Ok(self.return_statement()?),
// start of an expression statement
Keyword::Sizeof
| Keyword::StaticAssert
| Keyword::Alignas
| Keyword::Alignof
| Keyword::Generic => self.expression_statement(),
decl if decl.is_decl_specifier() => {
let decls = self.declaration()?;
let location = match decls.front() {
Some(decl) => decl.location,
None => {
return Ok(Stmt {
data: Default::default(),
location: self.last_location,
})
}
};
Ok(Stmt {
data: StmtType::Decl(decls),
location,
})
}
other => {
let err = SyntaxError::NotAStatement(*other);
Err(self.next_location().with(err))
}
},
Some(Token::Semicolon) => {
let Locatable { location, .. } = self.next_token().expect("peek is broken");
Ok(Stmt {
data: Default::default(),
location,
})
}
Some(Token::Id(_)) => {
let locatable = self.next_token().unwrap();
let id = match locatable.data {
Token::Id(id) => Locatable {
data: id,
location: locatable.location,
},
_ => unreachable!("peek should always be the same as next"),
};
if self.match_next(&Token::Colon).is_some() {
return Ok(Stmt {
data: StmtType::Label(id.data, Box::new(self.statement()?)),
location: id.location,
});
}
let is_typedef = match self.scope.get(&id.data) {
Some(typedef) => typedef.storage_class == StorageClass::Typedef,
_ => false,
};
self.unput(Some(Locatable {
data: Token::Id(id.data),
location: id.location,
}));
if is_typedef {
let decls = self.declaration()?;
let location = match decls.front() {
Some(decl) => decl.location,
None => {
return Ok(Stmt {
data: Default::default(),
location: self.last_location,
})
}
};
Ok(Stmt {
data: StmtType::Decl(decls),
location,
})
} else {
self.expression_statement()
}
}
_ => self.expression_statement(),
}
}
// expr ;
fn expression_statement(&mut self) -> SyntaxResult<Stmt> {
let expr = self.expr()?;
let end = self.expect(Token::Semicolon)?;
Ok(Stmt {
data: StmtType::Expr(expr),
location: end.location,
})
}
// return (expr)? ;
fn return_statement(&mut self) -> StmtResult {
let ret_token = self.expect(Token::Keyword(Keyword::Return)).unwrap();
let expr = self.expr_opt(Token::Semicolon)?;
let current = self
.current_function
.as_ref()
.expect("should have current_function set when parsing statements");
let ret_type = ¤t.return_type;
let stmt = match (expr, *ret_type != Type::Void) {
(None, false) => StmtType::Return(None),
(None, true) => {
let err = format!("function '{}' does not return a value", current.id);
self.semantic_err(err, ret_token.location);
// TODO: will this break codegen?
StmtType::Return(None)
}
(Some(expr), false) => {
let err = format!("void function '{}' should not return a value", current.id);
self.semantic_err(err, expr.location);
StmtType::Return(None)
}
(Some(expr), true) => {
let expr = expr.rval();
if expr.ctype != *ret_type {
StmtType::Return(Some(
Expr::cast(expr, ret_type).recover(&mut self.error_handler),
))
} else {
StmtType::Return(Some(expr))
}
}
};
Ok(Stmt {
data: stmt,
location: ret_token.location,
})
}
/// if_statement:
/// IF '(' expr ')' statement
/// | IF '(' expr ')' statement ELSE statement
fn if_statement(&mut self) -> StmtResult {
let start = self
.expect(Token::Keyword(Keyword::If))
.expect("parser shouldn't call if_statement without an if");
self.expect(Token::LeftParen)?;
let condition = self.expr()?.rval();
self.expect(Token::RightParen)?;
let body = self.statement()?;
let otherwise = if self.match_next(&Token::Keyword(Keyword::Else)).is_some() {
// NOTE: `if (1) ; else ;` is legal!
Some(Box::new(self.statement()?))
} else {
None
};
let stmt = StmtType::If(condition, Box::new(body), otherwise);
Ok(Stmt {
data: stmt,
location: start.location,
})
}
/// switch_statement: SWITCH '(' expr ')' statement
fn switch_statement(&mut self) -> StmtResult {
let start = self.expect(Token::Keyword(Keyword::Switch))?;
self.expect(Token::LeftParen)?;
let expr = self.expr()?.rval();
self.expect(Token::RightParen)?;
let body = self.statement()?;
let stmt = StmtType::Switch(expr, Box::new(body));
Ok(Stmt {
data: stmt,
location: start.location,
})
}
/// while_statement: WHILE '(' expr ')' statement
fn while_statement(&mut self) -> StmtResult {
let start = self.expect(Token::Keyword(Keyword::While))?;
self.expect(Token::LeftParen)?;
let condition = self.expr()?.truthy().recover(&mut self.error_handler);
self.expect(Token::RightParen)?;
let body = self.statement()?;
Ok(Stmt {
data: StmtType::While(condition, Box::new(body)),
location: start.location,
})
}
/// do_while_statement: DO statement WHILE '(' expr ')' ';'
fn do_while_statement(&mut self) -> StmtResult {
let start = self
.expect(Token::Keyword(Keyword::Do))
.unwrap_or_else(|_| {
panic!("do_while_statement should only be called with `do` as next token")
});
let body = self.statement()?;
self.expect(Token::Keyword(Keyword::While))?;
self.expect(Token::LeftParen)?;
let condition = self.expr()?.truthy().recover(&mut self.error_handler);
self.expect(Token::RightParen)?;
self.expect(Token::Semicolon)?;
let stmt = StmtType::Do(Box::new(body), condition);
Ok(Stmt {
data: stmt,
location: start.location,
})
}
/// for_statement:
/// FOR '(' expr_opt ';' expr_opt ';' expr_opt ') statement
/// | FOR '(' declaration expr_opt ';' expr_opt ') statement
fn for_statement(&mut self) -> StmtResult {
let start = self.expect(Token::Keyword(Keyword::For))?;
let paren = self.expect(Token::LeftParen)?;
self.enter_scope();
let decl_stmt = match self.peek_token() {
Some(Token::Keyword(k)) if k.is_decl_specifier() => StmtType::Decl(self.declaration()?),
Some(Token::Id(id)) => {
let id = *id;
match self.scope.get(&id) {
Some(symbol) if symbol.storage_class == StorageClass::Typedef => {
StmtType::Decl(self.declaration()?)
}
_ => match self.expr_opt(Token::Semicolon)? {
Some(expr) => StmtType::Expr(expr),
None => Default::default(),
},
}
}
Some(_) => match self.expr_opt(Token::Semicolon)? {
Some(expr) => StmtType::Expr(expr),
None => Default::default(),
},
None => {
return Err(self
.last_location
.with(SyntaxError::EndOfFile("expression or ';'")));
}
};
let decl = Box::new(Stmt {
data: decl_stmt,
location: paren.location,
});
let controlling_expr = self
.expr_opt(Token::Semicolon)?
.map(|expr| Expr::truthy(expr).recover(&mut self.error_handler));
let iter_expr = self.expr_opt(Token::RightParen)?;
let body = Box::new(self.statement()?);
self.leave_scope(self.last_location);
Ok(Stmt {
data: StmtType::For(
decl,
controlling_expr.map(Box::new),
iter_expr.map(Box::new),
body,
),
location: start.location,
})
}
/// goto_statement: GOTO identifier ';'
fn goto_statement(&mut self) -> StmtResult {
let start = self.expect(Token::Keyword(Keyword::Goto)).unwrap();
let id = match self.expect(Token::Id(Default::default()))?.data {
Token::Id(id) => id,
_ => unreachable!("expect should only return an Id if called with Token::Id"),
};
self.expect(Token::Semicolon)?;
Ok(Stmt {
data: StmtType::Goto(id),
location: start.location,
})
}
}
#[cfg(test)]
mod tests {
use super::super::tests::*;
use crate::data::prelude::*;
use crate::intern::InternedStr;
fn parse_stmt(stmt: &str) -> CompileResult<Stmt> {
let mut p = parser(stmt);
let exp = p.statement();
if let Some(err) = p.error_handler.pop_front() {
Err(err)
} else {
exp.map_err(CompileError::from)
}
}
#[test]
// NOTE: this seems to be one of the few tests that checks that the location
// is correct. If it starts failing, maybe look at the lexer first
fn test_expr_stmt() {
let parsed = parse_stmt("1;");
let expected = Ok(Stmt {
data: StmtType::Expr(parser("1").expr().unwrap()),
location: Location {
filename: InternedStr::get_or_intern("<test suite>"),
// TODO: this should really be 0..2
// but I haven't implemented merging spans yet
span: (1..2).into(),
},
});
assert_eq!(parsed, expected);
assert_eq!(parsed.unwrap().location, expected.unwrap().location);
}
}
| 38.920375 | 100 | 0.463987 |
f7695aadd0e09a0e95ba69e89183751625e362b3
| 3,153 |
vir_raw_block! { kw =>
pub mod kw {
syn::custom_keyword!(assert);
syn::custom_keyword!(assume);
syn::custom_keyword!(havoc);
syn::custom_keyword!(assign);
}
}
vir_raw_block! { Assert =>
impl syn::parse::Parse for Assert {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
input.parse::<kw::assert>()?;
let statement = Self {
label: input.parse()?,
assertion: input.parse()?,
};
input.parse::<syn::Token![;]>()?;
Ok(statement)
}
}
impl quote::ToTokens for Assert {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let label = self.label.to_string();
let assertion = &self.assertion;
tokens.extend(quote::quote! {
Assert { label: Some(#label.into()), assertion: #assertion }
})
}
}
}
vir_raw_block! { Assume =>
impl syn::parse::Parse for Assume {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
input.parse::<kw::assume>()?;
let statement = Self {
label: input.parse()?,
assertion: input.parse()?,
};
input.parse::<syn::Token![;]>()?;
Ok(statement)
}
}
impl quote::ToTokens for Assume {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let label = self.label.to_string();
let assertion = &self.assertion;
tokens.extend(quote::quote! {
Assume { label: Some(#label.into()), assertion: #assertion }
})
}
}
}
vir_raw_block! { Havoc =>
impl syn::parse::Parse for Havoc {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
input.parse::<kw::havoc>()?;
let statement = Self::Variable(input.parse()?);
input.parse::<syn::Token![;]>()?;
Ok(statement)
}
}
impl quote::ToTokens for Havoc {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
match self {
Havoc::Variable(variable) => {
tokens.extend(quote::quote! {
Havoc::Variable(#variable)
})
}
}
}
}
}
vir_raw_block! { Assign =>
impl syn::parse::Parse for Assign {
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
input.parse::<kw::assign>()?;
let variable = input.parse()?;
input.parse::<syn::Token![=]>()?;
let expression = input.parse()?;
input.parse::<syn::Token![;]>()?;
Ok(Self { variable, expression })
}
}
impl quote::ToTokens for Assign {
fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) {
let variable = &self.variable;
let expression = &self.expression;
tokens.extend(quote::quote! {
Assign { variable: #variable, expression: #expression }
})
}
}
}
| 32.173469 | 76 | 0.497304 |
e81e2713cd4875be24842dfa9155b09183445be0
| 32,997 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*! See doc.rs for documentation */
#![allow(non_camel_case_types)]
pub use middle::ty::IntVarValue;
pub use middle::typeck::infer::resolve::resolve_and_force_all_but_regions;
pub use middle::typeck::infer::resolve::{force_all, not_regions};
pub use middle::typeck::infer::resolve::{force_ivar};
pub use middle::typeck::infer::resolve::{force_tvar, force_rvar};
pub use middle::typeck::infer::resolve::{resolve_ivar, resolve_all};
pub use middle::typeck::infer::resolve::{resolve_nested_tvar};
pub use middle::typeck::infer::resolve::{resolve_rvar};
use collections::HashMap;
use middle::ty::{TyVid, IntVid, FloatVid, RegionVid, Vid};
use middle::ty;
use middle::ty_fold;
use middle::ty_fold::TypeFolder;
use middle::typeck::check::regionmanip::replace_late_bound_regions_in_fn_sig;
use middle::typeck::infer::coercion::Coerce;
use middle::typeck::infer::combine::{Combine, CombineFields, eq_tys};
use middle::typeck::infer::region_inference::{RegionVarBindings};
use middle::typeck::infer::resolve::{resolver};
use middle::typeck::infer::sub::Sub;
use middle::typeck::infer::lub::Lub;
use middle::typeck::infer::to_str::InferStr;
use middle::typeck::infer::unify::{ValsAndBindings, Root};
use middle::typeck::infer::error_reporting::ErrorReporting;
use std::cell::{Cell, RefCell};
use std::rc::Rc;
use syntax::ast;
use syntax::codemap;
use syntax::codemap::Span;
use syntax::owned_slice::OwnedSlice;
use util::common::indent;
use util::ppaux::{bound_region_to_str, ty_to_str, trait_ref_to_str, Repr};
pub mod doc;
pub mod macros;
pub mod combine;
pub mod glb;
pub mod lattice;
pub mod lub;
pub mod region_inference;
pub mod resolve;
pub mod sub;
pub mod to_str;
pub mod unify;
pub mod coercion;
pub mod error_reporting;
pub type Bound<T> = Option<T>;
#[deriving(Clone)]
pub struct Bounds<T> {
lb: Bound<T>,
ub: Bound<T>
}
pub type cres<T> = Result<T,ty::type_err>; // "combine result"
pub type ures = cres<()>; // "unify result"
pub type fres<T> = Result<T, fixup_err>; // "fixup result"
pub type CoerceResult = cres<Option<ty::AutoAdjustment>>;
pub struct InferCtxt<'a> {
pub tcx: &'a ty::ctxt,
// We instantiate ValsAndBindings with bounds<ty::t> because the
// types that might instantiate a general type variable have an
// order, represented by its upper and lower bounds.
pub ty_var_bindings: RefCell<ValsAndBindings<ty::TyVid, Bounds<ty::t>>>,
pub ty_var_counter: Cell<uint>,
// Map from integral variable to the kind of integer it represents
pub int_var_bindings: RefCell<ValsAndBindings<ty::IntVid,
Option<IntVarValue>>>,
pub int_var_counter: Cell<uint>,
// Map from floating variable to the kind of float it represents
pub float_var_bindings: RefCell<ValsAndBindings<ty::FloatVid,
Option<ast::FloatTy>>>,
pub float_var_counter: Cell<uint>,
// For region variables.
pub region_vars: RegionVarBindings<'a>,
}
/// Why did we require that the two types be related?
///
/// See `error_reporting.rs` for more details
#[deriving(Clone)]
pub enum TypeOrigin {
// Not yet categorized in a better way
Misc(Span),
// Checking that method of impl is compatible with trait
MethodCompatCheck(Span),
// Checking that this expression can be assigned where it needs to be
// FIXME(eddyb) #11161 is the original Expr required?
ExprAssignable(Span),
// Relating trait refs when resolving vtables
RelateTraitRefs(Span),
// Relating trait refs when resolving vtables
RelateSelfType(Span),
// Computing common supertype in the arms of a match expression
MatchExpressionArm(Span, Span),
// Computing common supertype in an if expression
IfExpression(Span),
}
/// See `error_reporting.rs` for more details
#[deriving(Clone)]
pub enum ValuePairs {
Types(ty::expected_found<ty::t>),
TraitRefs(ty::expected_found<Rc<ty::TraitRef>>),
}
/// The trace designates the path through inference that we took to
/// encounter an error or subtyping constraint.
///
/// See `error_reporting.rs` for more details.
#[deriving(Clone)]
pub struct TypeTrace {
origin: TypeOrigin,
values: ValuePairs,
}
/// The origin of a `r1 <= r2` constraint.
///
/// See `error_reporting.rs` for more details
#[deriving(Clone)]
pub enum SubregionOrigin {
// Arose from a subtyping relation
Subtype(TypeTrace),
// Stack-allocated closures cannot outlive innermost loop
// or function so as to ensure we only require finite stack
InfStackClosure(Span),
// Invocation of closure must be within its lifetime
InvokeClosure(Span),
// Dereference of reference must be within its lifetime
DerefPointer(Span),
// Closure bound must not outlive captured free variables
FreeVariable(Span, ast::NodeId),
// Index into slice must be within its lifetime
IndexSlice(Span),
// When casting `&'a T` to an `&'b Trait` object,
// relating `'a` to `'b`
RelateObjectBound(Span),
// Creating a pointer `b` to contents of another reference
Reborrow(Span),
// Creating a pointer `b` to contents of an upvar
ReborrowUpvar(Span, ty::UpvarId),
// (&'a &'b T) where a >= b
ReferenceOutlivesReferent(ty::t, Span),
// A `ref b` whose region does not enclose the decl site
BindingTypeIsNotValidAtDecl(Span),
// Regions appearing in a method receiver must outlive method call
CallRcvr(Span),
// Regions appearing in a function argument must outlive func call
CallArg(Span),
// Region in return type of invoked fn must enclose call
CallReturn(Span),
// Region resulting from a `&` expr must enclose the `&` expr
AddrOf(Span),
// An auto-borrow that does not enclose the expr where it occurs
AutoBorrow(Span),
}
/// Reasons to create a region inference variable
///
/// See `error_reporting.rs` for more details
#[deriving(Clone)]
pub enum RegionVariableOrigin {
// Region variables created for ill-categorized reasons,
// mostly indicates places in need of refactoring
MiscVariable(Span),
// Regions created by a `&P` or `[...]` pattern
PatternRegion(Span),
// Regions created by `&` operator
AddrOfRegion(Span),
// Regions created by `&[...]` literal
AddrOfSlice(Span),
// Regions created as part of an autoref of a method receiver
Autoref(Span),
// Regions created as part of an automatic coercion
Coercion(TypeTrace),
// Region variables created as the values for early-bound regions
EarlyBoundRegion(Span, ast::Name),
// Region variables created for bound regions
// in a function or method that is called
LateBoundRegion(Span, ty::BoundRegion),
// Region variables created for bound regions
// when doing subtyping/lub/glb computations
BoundRegionInFnType(Span, ty::BoundRegion),
UpvarRegion(ty::UpvarId, Span),
BoundRegionInCoherence(ast::Name),
}
pub enum fixup_err {
unresolved_int_ty(IntVid),
unresolved_ty(TyVid),
cyclic_ty(TyVid),
unresolved_region(RegionVid),
region_var_bound_by_region_var(RegionVid, RegionVid)
}
pub fn fixup_err_to_str(f: fixup_err) -> String {
match f {
unresolved_int_ty(_) => "unconstrained integral type".to_string(),
unresolved_ty(_) => "unconstrained type".to_string(),
cyclic_ty(_) => "cyclic type of infinite size".to_string(),
unresolved_region(_) => "unconstrained region".to_string(),
region_var_bound_by_region_var(r1, r2) => {
format!("region var {:?} bound by another region var {:?}; \
this is a bug in rustc", r1, r2)
}
}
}
pub fn new_infer_ctxt<'a>(tcx: &'a ty::ctxt) -> InferCtxt<'a> {
InferCtxt {
tcx: tcx,
ty_var_bindings: RefCell::new(ValsAndBindings::new()),
ty_var_counter: Cell::new(0),
int_var_bindings: RefCell::new(ValsAndBindings::new()),
int_var_counter: Cell::new(0),
float_var_bindings: RefCell::new(ValsAndBindings::new()),
float_var_counter: Cell::new(0),
region_vars: RegionVarBindings::new(tcx),
}
}
pub fn common_supertype(cx: &InferCtxt,
origin: TypeOrigin,
a_is_expected: bool,
a: ty::t,
b: ty::t)
-> ty::t {
/*!
* Computes the least upper-bound of `a` and `b`. If this is
* not possible, reports an error and returns ty::err.
*/
debug!("common_supertype({}, {})", a.inf_str(cx), b.inf_str(cx));
let trace = TypeTrace {
origin: origin,
values: Types(expected_found(a_is_expected, a, b))
};
let result = cx.commit(|| cx.lub(a_is_expected, trace.clone()).tys(a, b));
match result {
Ok(t) => t,
Err(ref err) => {
cx.report_and_explain_type_error(trace, err);
ty::mk_err()
}
}
}
pub fn mk_subty(cx: &InferCtxt,
a_is_expected: bool,
origin: TypeOrigin,
a: ty::t,
b: ty::t)
-> ures {
debug!("mk_subty({} <: {})", a.inf_str(cx), b.inf_str(cx));
indent(|| {
cx.commit(|| {
let trace = TypeTrace {
origin: origin,
values: Types(expected_found(a_is_expected, a, b))
};
cx.sub(a_is_expected, trace).tys(a, b)
})
}).to_ures()
}
pub fn can_mk_subty(cx: &InferCtxt, a: ty::t, b: ty::t) -> ures {
debug!("can_mk_subty({} <: {})", a.inf_str(cx), b.inf_str(cx));
indent(|| {
cx.probe(|| {
let trace = TypeTrace {
origin: Misc(codemap::DUMMY_SP),
values: Types(expected_found(true, a, b))
};
cx.sub(true, trace).tys(a, b)
})
}).to_ures()
}
pub fn mk_subr(cx: &InferCtxt,
_a_is_expected: bool,
origin: SubregionOrigin,
a: ty::Region,
b: ty::Region) {
debug!("mk_subr({} <: {})", a.inf_str(cx), b.inf_str(cx));
cx.region_vars.start_snapshot();
cx.region_vars.make_subregion(origin, a, b);
cx.region_vars.commit();
}
pub fn mk_eqty(cx: &InferCtxt,
a_is_expected: bool,
origin: TypeOrigin,
a: ty::t,
b: ty::t)
-> ures {
debug!("mk_eqty({} <: {})", a.inf_str(cx), b.inf_str(cx));
indent(|| {
cx.commit(|| {
let trace = TypeTrace {
origin: origin,
values: Types(expected_found(a_is_expected, a, b))
};
let suber = cx.sub(a_is_expected, trace);
eq_tys(&suber, a, b)
})
}).to_ures()
}
pub fn mk_sub_trait_refs(cx: &InferCtxt,
a_is_expected: bool,
origin: TypeOrigin,
a: Rc<ty::TraitRef>,
b: Rc<ty::TraitRef>)
-> ures
{
debug!("mk_sub_trait_refs({} <: {})",
a.inf_str(cx), b.inf_str(cx));
indent(|| {
cx.commit(|| {
let trace = TypeTrace {
origin: origin,
values: TraitRefs(expected_found(a_is_expected, a.clone(), b.clone()))
};
let suber = cx.sub(a_is_expected, trace);
suber.trait_refs(&*a, &*b)
})
}).to_ures()
}
fn expected_found<T>(a_is_expected: bool,
a: T,
b: T) -> ty::expected_found<T> {
if a_is_expected {
ty::expected_found {expected: a, found: b}
} else {
ty::expected_found {expected: b, found: a}
}
}
pub fn mk_coercety(cx: &InferCtxt,
a_is_expected: bool,
origin: TypeOrigin,
a: ty::t,
b: ty::t)
-> CoerceResult {
debug!("mk_coercety({} -> {})", a.inf_str(cx), b.inf_str(cx));
indent(|| {
cx.commit(|| {
let trace = TypeTrace {
origin: origin,
values: Types(expected_found(a_is_expected, a, b))
};
Coerce(cx.combine_fields(a_is_expected, trace)).tys(a, b)
})
})
}
// See comment on the type `resolve_state` below
pub fn resolve_type(cx: &InferCtxt,
a: ty::t,
modes: uint)
-> fres<ty::t> {
let mut resolver = resolver(cx, modes);
resolver.resolve_type_chk(a)
}
pub fn resolve_region(cx: &InferCtxt, r: ty::Region, modes: uint)
-> fres<ty::Region> {
let mut resolver = resolver(cx, modes);
resolver.resolve_region_chk(r)
}
trait then {
fn then<T:Clone>(&self, f: || -> Result<T,ty::type_err>)
-> Result<T,ty::type_err>;
}
impl then for ures {
fn then<T:Clone>(&self, f: || -> Result<T,ty::type_err>)
-> Result<T,ty::type_err> {
self.and_then(|_i| f())
}
}
trait ToUres {
fn to_ures(&self) -> ures;
}
impl<T> ToUres for cres<T> {
fn to_ures(&self) -> ures {
match *self {
Ok(ref _v) => Ok(()),
Err(ref e) => Err((*e))
}
}
}
trait CresCompare<T> {
fn compare(&self, t: T, f: || -> ty::type_err) -> cres<T>;
}
impl<T:Clone + PartialEq> CresCompare<T> for cres<T> {
fn compare(&self, t: T, f: || -> ty::type_err) -> cres<T> {
(*self).clone().and_then(|s| {
if s == t {
(*self).clone()
} else {
Err(f())
}
})
}
}
pub fn uok() -> ures {
Ok(())
}
fn rollback_to<V:Clone + Vid,T:Clone>(vb: &mut ValsAndBindings<V, T>,
len: uint) {
while vb.bindings.len() != len {
let (vid, old_v) = vb.bindings.pop().unwrap();
vb.vals.insert(vid.to_uint(), old_v);
}
}
pub struct Snapshot {
ty_var_bindings_len: uint,
int_var_bindings_len: uint,
float_var_bindings_len: uint,
region_vars_snapshot: uint,
}
impl<'a> InferCtxt<'a> {
pub fn combine_fields<'a>(&'a self, a_is_expected: bool, trace: TypeTrace)
-> CombineFields<'a> {
CombineFields {infcx: self,
a_is_expected: a_is_expected,
trace: trace}
}
pub fn sub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Sub<'a> {
Sub(self.combine_fields(a_is_expected, trace))
}
pub fn lub<'a>(&'a self, a_is_expected: bool, trace: TypeTrace) -> Lub<'a> {
Lub(self.combine_fields(a_is_expected, trace))
}
pub fn in_snapshot(&self) -> bool {
self.region_vars.in_snapshot()
}
pub fn start_snapshot(&self) -> Snapshot {
Snapshot {
ty_var_bindings_len: self.ty_var_bindings.borrow().bindings.len(),
int_var_bindings_len: self.int_var_bindings.borrow().bindings.len(),
float_var_bindings_len: self.float_var_bindings.borrow().bindings.len(),
region_vars_snapshot: self.region_vars.start_snapshot(),
}
}
pub fn rollback_to(&self, snapshot: &Snapshot) {
debug!("rollback!");
rollback_to(&mut *self.ty_var_bindings.borrow_mut(),
snapshot.ty_var_bindings_len);
rollback_to(&mut *self.int_var_bindings.borrow_mut(),
snapshot.int_var_bindings_len);
rollback_to(&mut *self.float_var_bindings.borrow_mut(),
snapshot.float_var_bindings_len);
self.region_vars.rollback_to(snapshot.region_vars_snapshot);
}
/// Execute `f` and commit the bindings if successful
pub fn commit<T,E>(&self, f: || -> Result<T,E>) -> Result<T,E> {
assert!(!self.in_snapshot());
debug!("commit()");
indent(|| {
let r = self.try(|| f());
self.ty_var_bindings.borrow_mut().bindings.truncate(0);
self.int_var_bindings.borrow_mut().bindings.truncate(0);
self.region_vars.commit();
r
})
}
/// Execute `f`, unroll bindings on failure
pub fn try<T,E>(&self, f: || -> Result<T,E>) -> Result<T,E> {
debug!("try()");
let snapshot = self.start_snapshot();
let r = f();
match r {
Ok(_) => { debug!("success"); }
Err(ref e) => {
debug!("error: {:?}", *e);
self.rollback_to(&snapshot)
}
}
r
}
/// Execute `f` then unroll any bindings it creates
pub fn probe<T,E>(&self, f: || -> Result<T,E>) -> Result<T,E> {
debug!("probe()");
indent(|| {
let snapshot = self.start_snapshot();
let r = f();
self.rollback_to(&snapshot);
r
})
}
}
fn next_simple_var<V:Clone,T:Clone>(counter: &mut uint,
bindings: &mut ValsAndBindings<V,
Option<T>>)
-> uint {
let id = *counter;
*counter += 1;
bindings.vals.insert(id, Root(None, 0));
return id;
}
impl<'a> InferCtxt<'a> {
pub fn next_ty_var_id(&self) -> TyVid {
let id = self.ty_var_counter.get();
self.ty_var_counter.set(id + 1);
{
let mut ty_var_bindings = self.ty_var_bindings.borrow_mut();
let vals = &mut ty_var_bindings.vals;
vals.insert(id, Root(Bounds { lb: None, ub: None }, 0u));
}
return TyVid(id);
}
pub fn next_ty_var(&self) -> ty::t {
ty::mk_var(self.tcx, self.next_ty_var_id())
}
pub fn next_ty_vars(&self, n: uint) -> Vec<ty::t> {
Vec::from_fn(n, |_i| self.next_ty_var())
}
pub fn next_int_var_id(&self) -> IntVid {
let mut int_var_counter = self.int_var_counter.get();
let mut int_var_bindings = self.int_var_bindings.borrow_mut();
let result = IntVid(next_simple_var(&mut int_var_counter,
&mut *int_var_bindings));
self.int_var_counter.set(int_var_counter);
result
}
pub fn next_float_var_id(&self) -> FloatVid {
let mut float_var_counter = self.float_var_counter.get();
let mut float_var_bindings = self.float_var_bindings.borrow_mut();
let result = FloatVid(next_simple_var(&mut float_var_counter,
&mut *float_var_bindings));
self.float_var_counter.set(float_var_counter);
result
}
pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region {
ty::ReInfer(ty::ReVar(self.region_vars.new_region_var(origin)))
}
pub fn region_vars_for_defs(&self,
span: Span,
defs: &[ty::RegionParameterDef])
-> OwnedSlice<ty::Region> {
defs.iter()
.map(|d| self.next_region_var(EarlyBoundRegion(span, d.name)))
.collect()
}
pub fn fresh_bound_region(&self, binder_id: ast::NodeId) -> ty::Region {
self.region_vars.new_bound(binder_id)
}
pub fn resolve_regions_and_report_errors(&self) {
let errors = self.region_vars.resolve_regions();
self.report_region_errors(&errors); // see error_reporting.rs
}
pub fn ty_to_str(&self, t: ty::t) -> String {
ty_to_str(self.tcx,
self.resolve_type_vars_if_possible(t))
}
pub fn tys_to_str(&self, ts: &[ty::t]) -> String {
let tstrs: Vec<String> = ts.iter().map(|t| self.ty_to_str(*t)).collect();
format!("({})", tstrs.connect(", "))
}
pub fn trait_ref_to_str(&self, t: &ty::TraitRef) -> String {
let t = self.resolve_type_vars_in_trait_ref_if_possible(t);
trait_ref_to_str(self.tcx, &t)
}
pub fn resolve_type_vars_if_possible(&self, typ: ty::t) -> ty::t {
match resolve_type(self, typ, resolve_nested_tvar | resolve_ivar) {
Ok(new_type) => new_type,
Err(_) => typ
}
}
pub fn resolve_type_vars_in_trait_ref_if_possible(&self,
trait_ref:
&ty::TraitRef)
-> ty::TraitRef {
// make up a dummy type just to reuse/abuse the resolve machinery
let dummy0 = ty::mk_trait(self.tcx,
trait_ref.def_id,
trait_ref.substs.clone(),
ty::UniqTraitStore,
ty::empty_builtin_bounds());
let dummy1 = self.resolve_type_vars_if_possible(dummy0);
match ty::get(dummy1).sty {
ty::ty_trait(box ty::TyTrait { ref def_id, ref substs, .. }) => {
ty::TraitRef {
def_id: *def_id,
substs: (*substs).clone(),
}
}
_ => {
self.tcx.sess.bug(
format!("resolve_type_vars_if_possible() yielded {} \
when supplied with {}",
self.ty_to_str(dummy0),
self.ty_to_str(dummy1)).as_slice());
}
}
}
// [Note-Type-error-reporting]
// An invariant is that anytime the expected or actual type is ty_err (the special
// error type, meaning that an error occurred when typechecking this expression),
// this is a derived error. The error cascaded from another error (that was already
// reported), so it's not useful to display it to the user.
// The following four methods -- type_error_message_str, type_error_message_str_with_expected,
// type_error_message, and report_mismatched_types -- implement this logic.
// They check if either the actual or expected type is ty_err, and don't print the error
// in this case. The typechecker should only ever report type errors involving mismatched
// types using one of these four methods, and should not call span_err directly for such
// errors.
pub fn type_error_message_str(&self,
sp: Span,
mk_msg: |Option<String>, String| -> String,
actual_ty: String,
err: Option<&ty::type_err>) {
self.type_error_message_str_with_expected(sp, mk_msg, None, actual_ty, err)
}
pub fn type_error_message_str_with_expected(&self,
sp: Span,
mk_msg: |Option<String>,
String|
-> String,
expected_ty: Option<ty::t>,
actual_ty: String,
err: Option<&ty::type_err>) {
debug!("hi! expected_ty = {:?}, actual_ty = {}", expected_ty, actual_ty);
let error_str = err.map_or("".to_string(), |t_err| {
format!(" ({})", ty::type_err_to_str(self.tcx, t_err))
});
let resolved_expected = expected_ty.map(|e_ty| {
self.resolve_type_vars_if_possible(e_ty)
});
if !resolved_expected.map_or(false, |e| { ty::type_is_error(e) }) {
match resolved_expected {
None => {
self.tcx
.sess
.span_err(sp,
format!("{}{}",
mk_msg(None, actual_ty),
error_str).as_slice())
}
Some(e) => {
self.tcx.sess.span_err(sp,
format!("{}{}",
mk_msg(Some(self.ty_to_str(e)), actual_ty),
error_str).as_slice());
}
}
for err in err.iter() {
ty::note_and_explain_type_err(self.tcx, *err)
}
}
}
pub fn type_error_message(&self,
sp: Span,
mk_msg: |String| -> String,
actual_ty: ty::t,
err: Option<&ty::type_err>) {
let actual_ty = self.resolve_type_vars_if_possible(actual_ty);
// Don't report an error if actual type is ty_err.
if ty::type_is_error(actual_ty) {
return;
}
self.type_error_message_str(sp, |_e, a| { mk_msg(a) }, self.ty_to_str(actual_ty), err);
}
pub fn report_mismatched_types(&self,
sp: Span,
e: ty::t,
a: ty::t,
err: &ty::type_err) {
let resolved_expected =
self.resolve_type_vars_if_possible(e);
let mk_msg = match ty::get(resolved_expected).sty {
// Don't report an error if expected is ty_err
ty::ty_err => return,
_ => {
// if I leave out : String, it infers &str and complains
|actual: String| {
format!("mismatched types: expected `{}` but found `{}`",
self.ty_to_str(resolved_expected),
actual)
}
}
};
self.type_error_message(sp, mk_msg, a, Some(err));
}
pub fn replace_late_bound_regions_with_fresh_regions(&self,
trace: TypeTrace,
fsig: &ty::FnSig)
-> (ty::FnSig,
HashMap<ty::BoundRegion,
ty::Region>) {
let (map, fn_sig) =
replace_late_bound_regions_in_fn_sig(self.tcx, fsig, |br| {
let rvar = self.next_region_var(
BoundRegionInFnType(trace.origin.span(), br));
debug!("Bound region {} maps to {:?}",
bound_region_to_str(self.tcx, "", false, br),
rvar);
rvar
});
(fn_sig, map)
}
}
pub fn fold_regions_in_sig(tcx: &ty::ctxt,
fn_sig: &ty::FnSig,
fldr: |r: ty::Region| -> ty::Region)
-> ty::FnSig {
ty_fold::RegionFolder::regions(tcx, fldr).fold_sig(fn_sig)
}
impl TypeTrace {
pub fn span(&self) -> Span {
self.origin.span()
}
}
impl Repr for TypeTrace {
fn repr(&self, tcx: &ty::ctxt) -> String {
format!("TypeTrace({})", self.origin.repr(tcx))
}
}
impl TypeOrigin {
pub fn span(&self) -> Span {
match *self {
MethodCompatCheck(span) => span,
ExprAssignable(span) => span,
Misc(span) => span,
RelateTraitRefs(span) => span,
RelateSelfType(span) => span,
MatchExpressionArm(match_span, _) => match_span,
IfExpression(span) => span,
}
}
}
impl Repr for TypeOrigin {
fn repr(&self, tcx: &ty::ctxt) -> String {
match *self {
MethodCompatCheck(a) => {
format!("MethodCompatCheck({})", a.repr(tcx))
}
ExprAssignable(a) => {
format!("ExprAssignable({})", a.repr(tcx))
}
Misc(a) => format!("Misc({})", a.repr(tcx)),
RelateTraitRefs(a) => {
format!("RelateTraitRefs({})", a.repr(tcx))
}
RelateSelfType(a) => {
format!("RelateSelfType({})", a.repr(tcx))
}
MatchExpressionArm(a, b) => {
format!("MatchExpressionArm({}, {})", a.repr(tcx), b.repr(tcx))
}
IfExpression(a) => {
format!("IfExpression({})", a.repr(tcx))
}
}
}
}
impl SubregionOrigin {
pub fn span(&self) -> Span {
match *self {
Subtype(ref a) => a.span(),
InfStackClosure(a) => a,
InvokeClosure(a) => a,
DerefPointer(a) => a,
FreeVariable(a, _) => a,
IndexSlice(a) => a,
RelateObjectBound(a) => a,
Reborrow(a) => a,
ReborrowUpvar(a, _) => a,
ReferenceOutlivesReferent(_, a) => a,
BindingTypeIsNotValidAtDecl(a) => a,
CallRcvr(a) => a,
CallArg(a) => a,
CallReturn(a) => a,
AddrOf(a) => a,
AutoBorrow(a) => a,
}
}
}
impl Repr for SubregionOrigin {
fn repr(&self, tcx: &ty::ctxt) -> String {
match *self {
Subtype(ref a) => {
format!("Subtype({})", a.repr(tcx))
}
InfStackClosure(a) => {
format!("InfStackClosure({})", a.repr(tcx))
}
InvokeClosure(a) => {
format!("InvokeClosure({})", a.repr(tcx))
}
DerefPointer(a) => {
format!("DerefPointer({})", a.repr(tcx))
}
FreeVariable(a, b) => {
format!("FreeVariable({}, {})", a.repr(tcx), b)
}
IndexSlice(a) => {
format!("IndexSlice({})", a.repr(tcx))
}
RelateObjectBound(a) => {
format!("RelateObjectBound({})", a.repr(tcx))
}
Reborrow(a) => format!("Reborrow({})", a.repr(tcx)),
ReborrowUpvar(a, b) => {
format!("ReborrowUpvar({},{:?})", a.repr(tcx), b)
}
ReferenceOutlivesReferent(_, a) => {
format!("ReferenceOutlivesReferent({})", a.repr(tcx))
}
BindingTypeIsNotValidAtDecl(a) => {
format!("BindingTypeIsNotValidAtDecl({})", a.repr(tcx))
}
CallRcvr(a) => format!("CallRcvr({})", a.repr(tcx)),
CallArg(a) => format!("CallArg({})", a.repr(tcx)),
CallReturn(a) => format!("CallReturn({})", a.repr(tcx)),
AddrOf(a) => format!("AddrOf({})", a.repr(tcx)),
AutoBorrow(a) => format!("AutoBorrow({})", a.repr(tcx)),
}
}
}
impl RegionVariableOrigin {
pub fn span(&self) -> Span {
match *self {
MiscVariable(a) => a,
PatternRegion(a) => a,
AddrOfRegion(a) => a,
AddrOfSlice(a) => a,
Autoref(a) => a,
Coercion(ref a) => a.span(),
EarlyBoundRegion(a, _) => a,
LateBoundRegion(a, _) => a,
BoundRegionInFnType(a, _) => a,
BoundRegionInCoherence(_) => codemap::DUMMY_SP,
UpvarRegion(_, a) => a
}
}
}
impl Repr for RegionVariableOrigin {
fn repr(&self, tcx: &ty::ctxt) -> String {
match *self {
MiscVariable(a) => {
format!("MiscVariable({})", a.repr(tcx))
}
PatternRegion(a) => {
format!("PatternRegion({})", a.repr(tcx))
}
AddrOfRegion(a) => {
format!("AddrOfRegion({})", a.repr(tcx))
}
AddrOfSlice(a) => format!("AddrOfSlice({})", a.repr(tcx)),
Autoref(a) => format!("Autoref({})", a.repr(tcx)),
Coercion(ref a) => format!("Coercion({})", a.repr(tcx)),
EarlyBoundRegion(a, b) => {
format!("EarlyBoundRegion({},{})", a.repr(tcx), b.repr(tcx))
}
LateBoundRegion(a, b) => {
format!("LateBoundRegion({},{})", a.repr(tcx), b.repr(tcx))
}
BoundRegionInFnType(a, b) => {
format!("bound_regionInFnType({},{})", a.repr(tcx),
b.repr(tcx))
}
BoundRegionInCoherence(a) => {
format!("bound_regionInCoherence({})", a.repr(tcx))
}
UpvarRegion(a, b) => {
format!("UpvarRegion({}, {})", a.repr(tcx), b.repr(tcx))
}
}
}
}
| 33.533537 | 98 | 0.532715 |
2224dc572b9666b4979c09467727d24391e33269
| 4,150 |
use std::sync::Arc;
use std::thread;
use std::time::Duration;
use std::{collections::HashSet, convert::TryFrom};
use graph::anyhow::bail;
use graph::components::store::{BlockStore as _, ChainStore as _};
use graph::prelude::{anyhow, BlockNumber, BlockPtr, NodeId, SubgraphStore};
use graph_store_postgres::BlockStore;
use graph_store_postgres::{connection_pool::ConnectionPool, Store};
use crate::manager::deployment::{Deployment, DeploymentSearch};
fn block_ptr(
store: Arc<BlockStore>,
searches: &[DeploymentSearch],
deployments: &[Deployment],
hash: &str,
number: BlockNumber,
force: bool,
) -> Result<BlockPtr, anyhow::Error> {
let block_ptr_to = BlockPtr::try_from((hash, number as i64))
.map_err(|e| anyhow!("error converting to block pointer: {}", e))?;
let chains = deployments.iter().map(|d| &d.chain).collect::<HashSet<_>>();
if chains.len() > 1 {
let names = searches
.into_iter()
.map(|s| s.to_string())
.collect::<Vec<_>>()
.join(", ");
bail!("the deployments matching `{names}` are on different chains");
}
let chain = chains.iter().next().unwrap();
let chain_store = match store.chain_store(chain) {
None => bail!("can not find chain store for {}", chain),
Some(store) => store,
};
if let Some((_, number)) = chain_store.block_number(&block_ptr_to.hash)? {
if number != block_ptr_to.number {
bail!(
"the given hash is for block number {} but the command specified block number {}",
number,
block_ptr_to.number
);
}
} else {
if !force {
bail!(
"the chain {} does not have a block with hash {} \
(run with --force to avoid this error)",
chain,
block_ptr_to.hash
);
}
}
Ok(block_ptr_to)
}
pub fn run(
primary: ConnectionPool,
store: Arc<Store>,
searches: Vec<DeploymentSearch>,
block_hash: String,
block_number: BlockNumber,
force: bool,
sleep: Duration,
) -> Result<(), anyhow::Error> {
const PAUSED: &str = "paused_";
let subgraph_store = store.subgraph_store();
let block_store = store.block_store();
let deployments = searches
.iter()
.map(|search| search.lookup(&primary))
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.flatten()
.collect::<Vec<_>>();
if deployments.is_empty() {
println!("nothing to do");
return Ok(());
}
let block_ptr_to = block_ptr(
block_store,
&searches,
&deployments,
&block_hash,
block_number,
force,
)?;
println!("Pausing deployments");
let mut paused = false;
for deployment in &deployments {
if let Some(node) = &deployment.node_id {
if !node.starts_with(PAUSED) {
let loc = deployment.locator();
let node =
NodeId::new(format!("{}{}", PAUSED, node)).expect("paused_ node id is valid");
subgraph_store.reassign_subgraph(&loc, &node)?;
println!(" ... paused {}", loc);
paused = true;
}
}
}
if paused {
// There's no good way to tell that a subgraph has in fact stopped
// indexing. We sleep and hope for the best.
println!("\nWaiting 10s to make sure pausing was processed");
thread::sleep(sleep);
}
println!("\nRewinding deployments");
for deployment in &deployments {
let loc = deployment.locator();
subgraph_store.rewind(loc.hash.clone(), block_ptr_to.clone())?;
println!(" ... rewound {}", loc);
}
println!("Resuming deployments");
for deployment in &deployments {
if let Some(node) = &deployment.node_id {
let loc = deployment.locator();
let node = NodeId::new(node.clone()).expect("node id is valid");
subgraph_store.reassign_subgraph(&loc, &node)?;
}
}
Ok(())
}
| 30.970149 | 98 | 0.565542 |
76c9dfce93d227431d19888e8f08bae0fa296e02
| 3,321 |
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test where we change the *signature* of a public, inherent method.
// revisions:cfail1 cfail2
// compile-flags: -Z query-dep-graph
// compile-pass
#![crate_type = "rlib"]
#![feature(rustc_attrs)]
#![feature(stmt_expr_attributes)]
#![allow(dead_code)]
// These are expected to require codegen.
#![rustc_partition_codegened(module="struct_point-point", cfg="cfail2")]
#![rustc_partition_codegened(module="struct_point-fn_calls_changed_method", cfg="cfail2")]
#![rustc_partition_reused(module="struct_point-fn_calls_another_method", cfg="cfail2")]
#![rustc_partition_reused(module="struct_point-fn_make_struct", cfg="cfail2")]
#![rustc_partition_reused(module="struct_point-fn_read_field", cfg="cfail2")]
#![rustc_partition_reused(module="struct_point-fn_write_field", cfg="cfail2")]
pub mod point {
pub struct Point {
pub x: f32,
pub y: f32,
}
impl Point {
#[cfg(cfail1)]
pub fn distance_from_point(&self, p: Option<Point>) -> f32 {
let p = p.unwrap_or(Point { x: 0.0, y: 0.0 });
let x_diff = self.x - p.x;
let y_diff = self.y - p.y;
return x_diff * x_diff + y_diff * y_diff;
}
#[cfg(cfail2)]
pub fn distance_from_point(&self, p: Option<&Point>) -> f32 {
const ORIGIN: &Point = &Point { x: 0.0, y: 0.0 };
let p = p.unwrap_or(ORIGIN);
let x_diff = self.x - p.x;
let y_diff = self.y - p.y;
return x_diff * x_diff + y_diff * y_diff;
}
pub fn x(&self) -> f32 {
self.x
}
}
}
/// A fn item that calls the method that was changed
pub mod fn_calls_changed_method {
use point::Point;
#[rustc_dirty(label="TypeckTables", cfg="cfail2")]
pub fn check() {
let p = Point { x: 2.0, y: 2.0 };
p.distance_from_point(None);
}
}
/// A fn item that calls a method that was not changed
pub mod fn_calls_another_method {
use point::Point;
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
pub fn check() {
let p = Point { x: 2.0, y: 2.0 };
p.x();
}
}
/// A fn item that makes an instance of `Point` but does not invoke methods
pub mod fn_make_struct {
use point::Point;
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
pub fn make_origin() -> Point {
Point { x: 2.0, y: 2.0 }
}
}
/// A fn item that reads fields from `Point` but does not invoke methods
pub mod fn_read_field {
use point::Point;
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
pub fn get_x(p: Point) -> f32 {
p.x
}
}
/// A fn item that writes to a field of `Point` but does not invoke methods
pub mod fn_write_field {
use point::Point;
#[rustc_clean(label="TypeckTables", cfg="cfail2")]
pub fn inc_x(p: &mut Point) {
p.x += 1.0;
}
}
| 29.651786 | 90 | 0.627522 |
790c6086cbcd5e959cdc03a2676f7842bedd11c1
| 716 |
// Copyright (c) 2020 Xu Shaohua <[email protected]>. All rights reserved.
// Use of this source is governed by Apache-2.0 License that can be found
// in the LICENSE file.
/// From `uapi/asm-generic/mman.h`
/// stack-like segment
pub const MAP_GROWSDOWN: i32 = 0x0100;
/// ETXTBSY
pub const MAP_DENYWRITE: i32 = 0x0800;
/// mark it as an executable
pub const MAP_EXECUTABLE: i32 = 0x1000;
/// pages are locked
pub const MAP_LOCKED: i32 = 0x2000;
/// don't check for reservations
pub const MAP_NORESERVE: i32 = 0x4000;
/// lock all current mappings
pub const MCL_CURRENT: i32 = 1;
/// lock all future mappings
pub const MCL_FUTURE: i32 = 2;
/// lock all pages that are faulted in
pub const MCL_ONFAULT: i32 = 4;
| 29.833333 | 75 | 0.72486 |
f7c587da686776c005ae152f69e1a65821450f4c
| 88,535 |
// This is a part of Chrono.
// See README.md and LICENSE.txt for details.
//! ISO 8601 calendar date without timezone.
#[cfg(any(feature = "alloc", feature = "std", test))]
use core::borrow::Borrow;
use core::{str, fmt};
use core::ops::{Add, Sub, AddAssign, SubAssign};
use num_traits::ToPrimitive;
use oldtime::Duration as OldDuration;
use {Weekday, Datelike};
use div::div_mod_floor;
use naive::{NaiveTime, NaiveDateTime, IsoWeek};
use format::{Item, Numeric, Pad};
use format::{parse, Parsed, ParseError, ParseResult, StrftimeItems};
#[cfg(any(feature = "alloc", feature = "std", test))]
use format::DelayedFormat;
use super::isoweek;
use super::internals::{self, DateImpl, Of, Mdf, YearFlags};
const MAX_YEAR: i32 = internals::MAX_YEAR;
const MIN_YEAR: i32 = internals::MIN_YEAR;
// MAX_YEAR-12-31 minus 0000-01-01
// = ((MAX_YEAR+1)-01-01 minus 0001-01-01) + (0001-01-01 minus 0000-01-01) - 1 day
// = ((MAX_YEAR+1)-01-01 minus 0001-01-01) + 365 days
// = MAX_YEAR * 365 + (# of leap years from 0001 to MAX_YEAR) + 365 days
#[cfg(test)] // only used for testing
const MAX_DAYS_FROM_YEAR_0: i32 = MAX_YEAR * 365 +
MAX_YEAR / 4 -
MAX_YEAR / 100 +
MAX_YEAR / 400 + 365;
// MIN_YEAR-01-01 minus 0000-01-01
// = (MIN_YEAR+400n+1)-01-01 minus (400n+1)-01-01
// = ((MIN_YEAR+400n+1)-01-01 minus 0001-01-01) - ((400n+1)-01-01 minus 0001-01-01)
// = ((MIN_YEAR+400n+1)-01-01 minus 0001-01-01) - 146097n days
//
// n is set to 1000 for convenience.
#[cfg(test)] // only used for testing
const MIN_DAYS_FROM_YEAR_0: i32 = (MIN_YEAR + 400_000) * 365 +
(MIN_YEAR + 400_000) / 4 -
(MIN_YEAR + 400_000) / 100 +
(MIN_YEAR + 400_000) / 400 - 146097_000;
#[cfg(test)] // only used for testing, but duplicated in naive::datetime
const MAX_BITS: usize = 44;
/// ISO 8601 calendar date without timezone.
/// Allows for every [proleptic Gregorian date](#calendar-date)
/// from Jan 1, 262145 BCE to Dec 31, 262143 CE.
/// Also supports the conversion from ISO 8601 ordinal and week date.
///
/// # Calendar Date
///
/// The ISO 8601 **calendar date** follows the proleptic Gregorian calendar.
/// It is like a normal civil calendar but note some slight differences:
///
/// * Dates before the Gregorian calendar's inception in 1582 are defined via the extrapolation.
/// Be careful, as historical dates are often noted in the Julian calendar and others
/// and the transition to Gregorian may differ across countries (as late as early 20C).
///
/// (Some example: Both Shakespeare from Britain and Cervantes from Spain seemingly died
/// on the same calendar date---April 23, 1616---but in the different calendar.
/// Britain used the Julian calendar at that time, so Shakespeare's death is later.)
///
/// * ISO 8601 calendars has the year 0, which is 1 BCE (a year before 1 CE).
/// If you need a typical BCE/BC and CE/AD notation for year numbers,
/// use the [`Datelike::year_ce`](../trait.Datelike.html#method.year_ce) method.
///
/// # Week Date
///
/// The ISO 8601 **week date** is a triple of year number, week number
/// and [day of the week](../enum.Weekday.html) with the following rules:
///
/// * A week consists of Monday through Sunday, and is always numbered within some year.
/// The week number ranges from 1 to 52 or 53 depending on the year.
///
/// * The week 1 of given year is defined as the first week containing January 4 of that year,
/// or equivalently, the first week containing four or more days in that year.
///
/// * The year number in the week date may *not* correspond to the actual Gregorian year.
/// For example, January 3, 2016 (Sunday) was on the last (53rd) week of 2015.
///
/// Chrono's date types default to the ISO 8601 [calendar date](#calendar-date),
/// but [`Datelike::iso_week`](../trait.Datelike.html#tymethod.iso_week) and
/// [`Datelike::weekday`](../trait.Datelike.html#tymethod.weekday) methods
/// can be used to get the corresponding week date.
///
/// # Ordinal Date
///
/// The ISO 8601 **ordinal date** is a pair of year number and day of the year ("ordinal").
/// The ordinal number ranges from 1 to 365 or 366 depending on the year.
/// The year number is same to that of the [calendar date](#calendar-date).
///
/// This is currently the internal format of Chrono's date types.
#[derive(PartialEq, Eq, Hash, PartialOrd, Ord, Copy, Clone)]
pub struct NaiveDate {
ymdf: DateImpl, // (year << 13) | of
}
/// The minimum possible `NaiveDate` (January 1, 262145 BCE).
pub const MIN_DATE: NaiveDate = NaiveDate { ymdf: (MIN_YEAR << 13) | (1 << 4) | 0o07 /*FE*/ };
/// The maximum possible `NaiveDate` (December 31, 262143 CE).
pub const MAX_DATE: NaiveDate = NaiveDate { ymdf: (MAX_YEAR << 13) | (365 << 4) | 0o17 /*F*/ };
// as it is hard to verify year flags in `MIN_DATE` and `MAX_DATE`,
// we use a separate run-time test.
#[test]
fn test_date_bounds() {
let calculated_min = NaiveDate::from_ymd(MIN_YEAR, 1, 1);
let calculated_max = NaiveDate::from_ymd(MAX_YEAR, 12, 31);
assert!(MIN_DATE == calculated_min,
"`MIN_DATE` should have a year flag {:?}", calculated_min.of().flags());
assert!(MAX_DATE == calculated_max,
"`MAX_DATE` should have a year flag {:?}", calculated_max.of().flags());
// let's also check that the entire range do not exceed 2^44 seconds
// (sometimes used for bounding `Duration` against overflow)
let maxsecs = MAX_DATE.signed_duration_since(MIN_DATE).num_seconds();
let maxsecs = maxsecs + 86401; // also take care of DateTime
assert!(maxsecs < (1 << MAX_BITS),
"The entire `NaiveDate` range somehow exceeds 2^{} seconds", MAX_BITS);
}
impl NaiveDate {
/// Makes a new `NaiveDate` from year and packed ordinal-flags, with a verification.
fn from_of(year: i32, of: Of) -> Option<NaiveDate> {
if year >= MIN_YEAR && year <= MAX_YEAR && of.valid() {
let Of(of) = of;
Some(NaiveDate { ymdf: (year << 13) | (of as DateImpl) })
} else {
None
}
}
/// Makes a new `NaiveDate` from year and packed month-day-flags, with a verification.
fn from_mdf(year: i32, mdf: Mdf) -> Option<NaiveDate> {
NaiveDate::from_of(year, mdf.to_of())
}
/// Makes a new `NaiveDate` from the [calendar date](#calendar-date)
/// (year, month and day).
///
/// Panics on the out-of-range date, invalid month and/or day.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike, Weekday};
///
/// let d = NaiveDate::from_ymd(2015, 3, 14);
/// assert_eq!(d.year(), 2015);
/// assert_eq!(d.month(), 3);
/// assert_eq!(d.day(), 14);
/// assert_eq!(d.ordinal(), 73); // day of year
/// assert_eq!(d.iso_week().year(), 2015);
/// assert_eq!(d.iso_week().week(), 11);
/// assert_eq!(d.weekday(), Weekday::Sat);
/// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE
/// ~~~~
pub fn from_ymd(year: i32, month: u32, day: u32) -> NaiveDate {
NaiveDate::from_ymd_opt(year, month, day).expect("invalid or out-of-range date")
}
/// Makes a new `NaiveDate` from the [calendar date](#calendar-date)
/// (year, month and day).
///
/// Returns `None` on the out-of-range date, invalid month and/or day.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let from_ymd_opt = NaiveDate::from_ymd_opt;
///
/// assert!(from_ymd_opt(2015, 3, 14).is_some());
/// assert!(from_ymd_opt(2015, 0, 14).is_none());
/// assert!(from_ymd_opt(2015, 2, 29).is_none());
/// assert!(from_ymd_opt(-4, 2, 29).is_some()); // 5 BCE is a leap year
/// assert!(from_ymd_opt(400000, 1, 1).is_none());
/// assert!(from_ymd_opt(-400000, 1, 1).is_none());
/// ~~~~
pub fn from_ymd_opt(year: i32, month: u32, day: u32) -> Option<NaiveDate> {
let flags = YearFlags::from_year(year);
NaiveDate::from_mdf(year, Mdf::new(month, day, flags))
}
/// Makes a new `NaiveDate` from the [ordinal date](#ordinal-date)
/// (year and day of the year).
///
/// Panics on the out-of-range date and/or invalid day of year.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike, Weekday};
///
/// let d = NaiveDate::from_yo(2015, 73);
/// assert_eq!(d.ordinal(), 73);
/// assert_eq!(d.year(), 2015);
/// assert_eq!(d.month(), 3);
/// assert_eq!(d.day(), 14);
/// assert_eq!(d.iso_week().year(), 2015);
/// assert_eq!(d.iso_week().week(), 11);
/// assert_eq!(d.weekday(), Weekday::Sat);
/// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE
/// ~~~~
pub fn from_yo(year: i32, ordinal: u32) -> NaiveDate {
NaiveDate::from_yo_opt(year, ordinal).expect("invalid or out-of-range date")
}
/// Makes a new `NaiveDate` from the [ordinal date](#ordinal-date)
/// (year and day of the year).
///
/// Returns `None` on the out-of-range date and/or invalid day of year.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let from_yo_opt = NaiveDate::from_yo_opt;
///
/// assert!(from_yo_opt(2015, 100).is_some());
/// assert!(from_yo_opt(2015, 0).is_none());
/// assert!(from_yo_opt(2015, 365).is_some());
/// assert!(from_yo_opt(2015, 366).is_none());
/// assert!(from_yo_opt(-4, 366).is_some()); // 5 BCE is a leap year
/// assert!(from_yo_opt(400000, 1).is_none());
/// assert!(from_yo_opt(-400000, 1).is_none());
/// ~~~~
pub fn from_yo_opt(year: i32, ordinal: u32) -> Option<NaiveDate> {
let flags = YearFlags::from_year(year);
NaiveDate::from_of(year, Of::new(ordinal, flags))
}
/// Makes a new `NaiveDate` from the [ISO week date](#week-date)
/// (year, week number and day of the week).
/// The resulting `NaiveDate` may have a different year from the input year.
///
/// Panics on the out-of-range date and/or invalid week number.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike, Weekday};
///
/// let d = NaiveDate::from_isoywd(2015, 11, Weekday::Sat);
/// assert_eq!(d.iso_week().year(), 2015);
/// assert_eq!(d.iso_week().week(), 11);
/// assert_eq!(d.weekday(), Weekday::Sat);
/// assert_eq!(d.year(), 2015);
/// assert_eq!(d.month(), 3);
/// assert_eq!(d.day(), 14);
/// assert_eq!(d.ordinal(), 73); // day of year
/// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE
/// ~~~~
pub fn from_isoywd(year: i32, week: u32, weekday: Weekday) -> NaiveDate {
NaiveDate::from_isoywd_opt(year, week, weekday).expect("invalid or out-of-range date")
}
/// Makes a new `NaiveDate` from the [ISO week date](#week-date)
/// (year, week number and day of the week).
/// The resulting `NaiveDate` may have a different year from the input year.
///
/// Returns `None` on the out-of-range date and/or invalid week number.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Weekday};
///
/// let from_ymd = NaiveDate::from_ymd;
/// let from_isoywd_opt = NaiveDate::from_isoywd_opt;
///
/// assert_eq!(from_isoywd_opt(2015, 0, Weekday::Sun), None);
/// assert_eq!(from_isoywd_opt(2015, 10, Weekday::Sun), Some(from_ymd(2015, 3, 8)));
/// assert_eq!(from_isoywd_opt(2015, 30, Weekday::Mon), Some(from_ymd(2015, 7, 20)));
/// assert_eq!(from_isoywd_opt(2015, 60, Weekday::Mon), None);
///
/// assert_eq!(from_isoywd_opt(400000, 10, Weekday::Fri), None);
/// assert_eq!(from_isoywd_opt(-400000, 10, Weekday::Sat), None);
/// ~~~~
///
/// The year number of ISO week date may differ from that of the calendar date.
///
/// ~~~~
/// # use chrono::{NaiveDate, Weekday};
/// # let from_ymd = NaiveDate::from_ymd;
/// # let from_isoywd_opt = NaiveDate::from_isoywd_opt;
/// // Mo Tu We Th Fr Sa Su
/// // 2014-W52 22 23 24 25 26 27 28 has 4+ days of new year,
/// // 2015-W01 29 30 31 1 2 3 4 <- so this is the first week
/// assert_eq!(from_isoywd_opt(2014, 52, Weekday::Sun), Some(from_ymd(2014, 12, 28)));
/// assert_eq!(from_isoywd_opt(2014, 53, Weekday::Mon), None);
/// assert_eq!(from_isoywd_opt(2015, 1, Weekday::Mon), Some(from_ymd(2014, 12, 29)));
///
/// // 2015-W52 21 22 23 24 25 26 27 has 4+ days of old year,
/// // 2015-W53 28 29 30 31 1 2 3 <- so this is the last week
/// // 2016-W01 4 5 6 7 8 9 10
/// assert_eq!(from_isoywd_opt(2015, 52, Weekday::Sun), Some(from_ymd(2015, 12, 27)));
/// assert_eq!(from_isoywd_opt(2015, 53, Weekday::Sun), Some(from_ymd(2016, 1, 3)));
/// assert_eq!(from_isoywd_opt(2015, 54, Weekday::Mon), None);
/// assert_eq!(from_isoywd_opt(2016, 1, Weekday::Mon), Some(from_ymd(2016, 1, 4)));
/// ~~~~
pub fn from_isoywd_opt(year: i32, week: u32, weekday: Weekday) -> Option<NaiveDate> {
let flags = YearFlags::from_year(year);
let nweeks = flags.nisoweeks();
if 1 <= week && week <= nweeks {
// ordinal = week ordinal - delta
let weekord = week * 7 + weekday as u32;
let delta = flags.isoweek_delta();
if weekord <= delta { // ordinal < 1, previous year
let prevflags = YearFlags::from_year(year - 1);
NaiveDate::from_of(year - 1, Of::new(weekord + prevflags.ndays() - delta,
prevflags))
} else {
let ordinal = weekord - delta;
let ndays = flags.ndays();
if ordinal <= ndays { // this year
NaiveDate::from_of(year, Of::new(ordinal, flags))
} else { // ordinal > ndays, next year
let nextflags = YearFlags::from_year(year + 1);
NaiveDate::from_of(year + 1, Of::new(ordinal - ndays, nextflags))
}
}
} else {
None
}
}
/// Makes a new `NaiveDate` from a day's number in the proleptic Gregorian calendar, with
/// January 1, 1 being day 1.
///
/// Panics if the date is out of range.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike, Weekday};
///
/// let d = NaiveDate::from_num_days_from_ce(735671);
/// assert_eq!(d.num_days_from_ce(), 735671); // days since January 1, 1 CE
/// assert_eq!(d.year(), 2015);
/// assert_eq!(d.month(), 3);
/// assert_eq!(d.day(), 14);
/// assert_eq!(d.ordinal(), 73); // day of year
/// assert_eq!(d.iso_week().year(), 2015);
/// assert_eq!(d.iso_week().week(), 11);
/// assert_eq!(d.weekday(), Weekday::Sat);
/// ~~~~
///
/// While not directly supported by Chrono,
/// it is easy to convert from the Julian day number
/// (January 1, 4713 BCE in the *Julian* calendar being Day 0)
/// to Gregorian with this method.
/// (Note that this panics when `jd` is out of range.)
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// fn jd_to_date(jd: i32) -> NaiveDate {
/// // keep in mind that the Julian day number is 0-based
/// // while this method requires an 1-based number.
/// NaiveDate::from_num_days_from_ce(jd - 1721425)
/// }
///
/// // January 1, 4713 BCE in Julian = November 24, 4714 BCE in Gregorian
/// assert_eq!(jd_to_date(0), NaiveDate::from_ymd(-4713, 11, 24));
///
/// assert_eq!(jd_to_date(1721426), NaiveDate::from_ymd(1, 1, 1));
/// assert_eq!(jd_to_date(2450000), NaiveDate::from_ymd(1995, 10, 9));
/// assert_eq!(jd_to_date(2451545), NaiveDate::from_ymd(2000, 1, 1));
/// ~~~~
#[inline]
pub fn from_num_days_from_ce(days: i32) -> NaiveDate {
NaiveDate::from_num_days_from_ce_opt(days).expect("out-of-range date")
}
/// Makes a new `NaiveDate` from a day's number in the proleptic Gregorian calendar, with
/// January 1, 1 being day 1.
///
/// Returns `None` if the date is out of range.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let from_ndays_opt = NaiveDate::from_num_days_from_ce_opt;
/// let from_ymd = NaiveDate::from_ymd;
///
/// assert_eq!(from_ndays_opt(730_000), Some(from_ymd(1999, 9, 3)));
/// assert_eq!(from_ndays_opt(1), Some(from_ymd(1, 1, 1)));
/// assert_eq!(from_ndays_opt(0), Some(from_ymd(0, 12, 31)));
/// assert_eq!(from_ndays_opt(-1), Some(from_ymd(0, 12, 30)));
/// assert_eq!(from_ndays_opt(100_000_000), None);
/// assert_eq!(from_ndays_opt(-100_000_000), None);
/// ~~~~
pub fn from_num_days_from_ce_opt(days: i32) -> Option<NaiveDate> {
let days = days + 365; // make December 31, 1 BCE equal to day 0
let (year_div_400, cycle) = div_mod_floor(days, 146_097);
let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32);
let flags = YearFlags::from_year_mod_400(year_mod_400 as i32);
NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32,
Of::new(ordinal, flags))
}
/// Makes a new `NaiveDate` by counting the number of occurances of a particular day-of-week
/// since the beginning of the given month. For instance, if you want the 2nd Friday of March
/// 2017, you would use `NaiveDate::from_weekday_of_month(2017, 3, Weekday::Fri, 2)`.
///
/// # Panics
///
/// The resulting `NaiveDate` is guaranteed to be in `month`. If `n` is larger than the number
/// of `weekday` in `month` (eg. the 6th Friday of March 2017) then this function will panic.
///
/// `n` is 1-indexed. Passing `n=0` will cause a panic.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Weekday};
///
/// let from_weekday_of_month = NaiveDate::from_weekday_of_month;
/// let from_ymd = NaiveDate::from_ymd;
///
/// assert_eq!(from_weekday_of_month(2018, 8, Weekday::Wed, 1), from_ymd(2018, 8, 1));
/// assert_eq!(from_weekday_of_month(2018, 8, Weekday::Fri, 1), from_ymd(2018, 8, 3));
/// assert_eq!(from_weekday_of_month(2018, 8, Weekday::Tue, 2), from_ymd(2018, 8, 14));
/// assert_eq!(from_weekday_of_month(2018, 8, Weekday::Fri, 4), from_ymd(2018, 8, 24));
/// assert_eq!(from_weekday_of_month(2018, 8, Weekday::Fri, 5), from_ymd(2018, 8, 31));
/// ~~~~
pub fn from_weekday_of_month(year: i32, month: u32, weekday: Weekday, n: u8) -> NaiveDate {
NaiveDate::from_weekday_of_month_opt(year, month, weekday, n).expect("out-of-range date")
}
/// Makes a new `NaiveDate` by counting the number of occurances of a particular day-of-week
/// since the beginning of the given month. For instance, if you want the 2nd Friday of March
/// 2017, you would use `NaiveDate::from_weekday_of_month(2017, 3, Weekday::Fri, 2)`. `n` is 1-indexed.
///
/// ~~~~
/// use chrono::{NaiveDate, Weekday};
/// assert_eq!(NaiveDate::from_weekday_of_month_opt(2017, 3, Weekday::Fri, 2),
/// NaiveDate::from_ymd_opt(2017, 3, 10))
/// ~~~~
///
/// Returns `None` if `n` out-of-range; ie. if `n` is larger than the number of `weekday` in
/// `month` (eg. the 6th Friday of March 2017), or if `n == 0`.
pub fn from_weekday_of_month_opt(year: i32, month: u32, weekday: Weekday, n: u8) -> Option<NaiveDate> {
if n == 0 { return None; }
let first = NaiveDate::from_ymd(year, month, 1).weekday();
let first_to_dow = (7 + weekday.number_from_monday() - first.number_from_monday()) % 7;
let day = (u32::from(n) - 1) * 7 + first_to_dow + 1;
NaiveDate::from_ymd_opt(year, month, day)
}
/// Parses a string with the specified format string and returns a new `NaiveDate`.
/// See the [`format::strftime` module](../format/strftime/index.html)
/// on the supported escape sequences.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let parse_from_str = NaiveDate::parse_from_str;
///
/// assert_eq!(parse_from_str("2015-09-05", "%Y-%m-%d"),
/// Ok(NaiveDate::from_ymd(2015, 9, 5)));
/// assert_eq!(parse_from_str("5sep2015", "%d%b%Y"),
/// Ok(NaiveDate::from_ymd(2015, 9, 5)));
/// ~~~~
///
/// Time and offset is ignored for the purpose of parsing.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// # let parse_from_str = NaiveDate::parse_from_str;
/// assert_eq!(parse_from_str("2014-5-17T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"),
/// Ok(NaiveDate::from_ymd(2014, 5, 17)));
/// ~~~~
///
/// Out-of-bound dates or insufficient fields are errors.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// # let parse_from_str = NaiveDate::parse_from_str;
/// assert!(parse_from_str("2015/9", "%Y/%m").is_err());
/// assert!(parse_from_str("2015/9/31", "%Y/%m/%d").is_err());
/// ~~~~
///
/// All parsed fields should be consistent to each other, otherwise it's an error.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// # let parse_from_str = NaiveDate::parse_from_str;
/// assert!(parse_from_str("Sat, 09 Aug 2013", "%a, %d %b %Y").is_err());
/// ~~~~
pub fn parse_from_str(s: &str, fmt: &str) -> ParseResult<NaiveDate> {
let mut parsed = Parsed::new();
parse(&mut parsed, s, StrftimeItems::new(fmt))?;
parsed.to_naive_date()
}
/// Makes a new `NaiveDateTime` from the current date and given `NaiveTime`.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, NaiveTime, NaiveDateTime};
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
/// let t = NaiveTime::from_hms_milli(12, 34, 56, 789);
///
/// let dt: NaiveDateTime = d.and_time(t);
/// assert_eq!(dt.date(), d);
/// assert_eq!(dt.time(), t);
/// ~~~~
#[inline]
pub fn and_time(&self, time: NaiveTime) -> NaiveDateTime {
NaiveDateTime::new(*self, time)
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute and second.
///
/// No [leap second](./struct.NaiveTime.html#leap-second-handling) is allowed here;
/// use `NaiveDate::and_hms_*` methods with a subsecond parameter instead.
///
/// Panics on invalid hour, minute and/or second.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday};
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
///
/// let dt: NaiveDateTime = d.and_hms(12, 34, 56);
/// assert_eq!(dt.year(), 2015);
/// assert_eq!(dt.weekday(), Weekday::Wed);
/// assert_eq!(dt.second(), 56);
/// ~~~~
#[inline]
pub fn and_hms(&self, hour: u32, min: u32, sec: u32) -> NaiveDateTime {
self.and_hms_opt(hour, min, sec).expect("invalid time")
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute and second.
///
/// No [leap second](./struct.NaiveTime.html#leap-second-handling) is allowed here;
/// use `NaiveDate::and_hms_*_opt` methods with a subsecond parameter instead.
///
/// Returns `None` on invalid hour, minute and/or second.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
/// assert!(d.and_hms_opt(12, 34, 56).is_some());
/// assert!(d.and_hms_opt(12, 34, 60).is_none()); // use `and_hms_milli_opt` instead
/// assert!(d.and_hms_opt(12, 60, 56).is_none());
/// assert!(d.and_hms_opt(24, 34, 56).is_none());
/// ~~~~
#[inline]
pub fn and_hms_opt(&self, hour: u32, min: u32, sec: u32) -> Option<NaiveDateTime> {
NaiveTime::from_hms_opt(hour, min, sec).map(|time| self.and_time(time))
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and millisecond.
///
/// The millisecond part can exceed 1,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Panics on invalid hour, minute, second and/or millisecond.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday};
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
///
/// let dt: NaiveDateTime = d.and_hms_milli(12, 34, 56, 789);
/// assert_eq!(dt.year(), 2015);
/// assert_eq!(dt.weekday(), Weekday::Wed);
/// assert_eq!(dt.second(), 56);
/// assert_eq!(dt.nanosecond(), 789_000_000);
/// ~~~~
#[inline]
pub fn and_hms_milli(&self, hour: u32, min: u32, sec: u32, milli: u32) -> NaiveDateTime {
self.and_hms_milli_opt(hour, min, sec, milli).expect("invalid time")
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and millisecond.
///
/// The millisecond part can exceed 1,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Returns `None` on invalid hour, minute, second and/or millisecond.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
/// assert!(d.and_hms_milli_opt(12, 34, 56, 789).is_some());
/// assert!(d.and_hms_milli_opt(12, 34, 59, 1_789).is_some()); // leap second
/// assert!(d.and_hms_milli_opt(12, 34, 59, 2_789).is_none());
/// assert!(d.and_hms_milli_opt(12, 34, 60, 789).is_none());
/// assert!(d.and_hms_milli_opt(12, 60, 56, 789).is_none());
/// assert!(d.and_hms_milli_opt(24, 34, 56, 789).is_none());
/// ~~~~
#[inline]
pub fn and_hms_milli_opt(&self, hour: u32, min: u32, sec: u32,
milli: u32) -> Option<NaiveDateTime> {
NaiveTime::from_hms_milli_opt(hour, min, sec, milli).map(|time| self.and_time(time))
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and microsecond.
///
/// The microsecond part can exceed 1,000,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Panics on invalid hour, minute, second and/or microsecond.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday};
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
///
/// let dt: NaiveDateTime = d.and_hms_micro(12, 34, 56, 789_012);
/// assert_eq!(dt.year(), 2015);
/// assert_eq!(dt.weekday(), Weekday::Wed);
/// assert_eq!(dt.second(), 56);
/// assert_eq!(dt.nanosecond(), 789_012_000);
/// ~~~~
#[inline]
pub fn and_hms_micro(&self, hour: u32, min: u32, sec: u32, micro: u32) -> NaiveDateTime {
self.and_hms_micro_opt(hour, min, sec, micro).expect("invalid time")
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and microsecond.
///
/// The microsecond part can exceed 1,000,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Returns `None` on invalid hour, minute, second and/or microsecond.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
/// assert!(d.and_hms_micro_opt(12, 34, 56, 789_012).is_some());
/// assert!(d.and_hms_micro_opt(12, 34, 59, 1_789_012).is_some()); // leap second
/// assert!(d.and_hms_micro_opt(12, 34, 59, 2_789_012).is_none());
/// assert!(d.and_hms_micro_opt(12, 34, 60, 789_012).is_none());
/// assert!(d.and_hms_micro_opt(12, 60, 56, 789_012).is_none());
/// assert!(d.and_hms_micro_opt(24, 34, 56, 789_012).is_none());
/// ~~~~
#[inline]
pub fn and_hms_micro_opt(&self, hour: u32, min: u32, sec: u32,
micro: u32) -> Option<NaiveDateTime> {
NaiveTime::from_hms_micro_opt(hour, min, sec, micro).map(|time| self.and_time(time))
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and nanosecond.
///
/// The nanosecond part can exceed 1,000,000,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Panics on invalid hour, minute, second and/or nanosecond.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, NaiveDateTime, Datelike, Timelike, Weekday};
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
///
/// let dt: NaiveDateTime = d.and_hms_nano(12, 34, 56, 789_012_345);
/// assert_eq!(dt.year(), 2015);
/// assert_eq!(dt.weekday(), Weekday::Wed);
/// assert_eq!(dt.second(), 56);
/// assert_eq!(dt.nanosecond(), 789_012_345);
/// ~~~~
#[inline]
pub fn and_hms_nano(&self, hour: u32, min: u32, sec: u32, nano: u32) -> NaiveDateTime {
self.and_hms_nano_opt(hour, min, sec, nano).expect("invalid time")
}
/// Makes a new `NaiveDateTime` from the current date, hour, minute, second and nanosecond.
///
/// The nanosecond part can exceed 1,000,000,000
/// in order to represent the [leap second](./struct.NaiveTime.html#leap-second-handling).
///
/// Returns `None` on invalid hour, minute, second and/or nanosecond.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 6, 3);
/// assert!(d.and_hms_nano_opt(12, 34, 56, 789_012_345).is_some());
/// assert!(d.and_hms_nano_opt(12, 34, 59, 1_789_012_345).is_some()); // leap second
/// assert!(d.and_hms_nano_opt(12, 34, 59, 2_789_012_345).is_none());
/// assert!(d.and_hms_nano_opt(12, 34, 60, 789_012_345).is_none());
/// assert!(d.and_hms_nano_opt(12, 60, 56, 789_012_345).is_none());
/// assert!(d.and_hms_nano_opt(24, 34, 56, 789_012_345).is_none());
/// ~~~~
#[inline]
pub fn and_hms_nano_opt(&self, hour: u32, min: u32, sec: u32,
nano: u32) -> Option<NaiveDateTime> {
NaiveTime::from_hms_nano_opt(hour, min, sec, nano).map(|time| self.and_time(time))
}
/// Returns the packed month-day-flags.
#[inline]
fn mdf(&self) -> Mdf {
self.of().to_mdf()
}
/// Returns the packed ordinal-flags.
#[inline]
fn of(&self) -> Of {
Of((self.ymdf & 0b1_1111_1111_1111) as u32)
}
/// Makes a new `NaiveDate` with the packed month-day-flags changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
#[inline]
fn with_mdf(&self, mdf: Mdf) -> Option<NaiveDate> {
self.with_of(mdf.to_of())
}
/// Makes a new `NaiveDate` with the packed ordinal-flags changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
#[inline]
fn with_of(&self, of: Of) -> Option<NaiveDate> {
if of.valid() {
let Of(of) = of;
Some(NaiveDate { ymdf: (self.ymdf & !0b1_1111_1111_1111) | of as DateImpl })
} else {
None
}
}
/// Makes a new `NaiveDate` for the next calendar date.
///
/// Panics when `self` is the last representable date.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).succ(), NaiveDate::from_ymd(2015, 6, 4));
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 30).succ(), NaiveDate::from_ymd(2015, 7, 1));
/// assert_eq!(NaiveDate::from_ymd(2015, 12, 31).succ(), NaiveDate::from_ymd(2016, 1, 1));
/// ~~~~
#[inline]
pub fn succ(&self) -> NaiveDate {
self.succ_opt().expect("out of bound")
}
/// Makes a new `NaiveDate` for the next calendar date.
///
/// Returns `None` when `self` is the last representable date.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
/// use chrono::naive::MAX_DATE;
///
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).succ_opt(),
/// Some(NaiveDate::from_ymd(2015, 6, 4)));
/// assert_eq!(MAX_DATE.succ_opt(), None);
/// ~~~~
#[inline]
pub fn succ_opt(&self) -> Option<NaiveDate> {
self.with_of(self.of().succ()).or_else(|| NaiveDate::from_ymd_opt(self.year() + 1, 1, 1))
}
/// Makes a new `NaiveDate` for the previous calendar date.
///
/// Panics when `self` is the first representable date.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).pred(), NaiveDate::from_ymd(2015, 6, 2));
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 1).pred(), NaiveDate::from_ymd(2015, 5, 31));
/// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).pred(), NaiveDate::from_ymd(2014, 12, 31));
/// ~~~~
#[inline]
pub fn pred(&self) -> NaiveDate {
self.pred_opt().expect("out of bound")
}
/// Makes a new `NaiveDate` for the previous calendar date.
///
/// Returns `None` when `self` is the first representable date.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
/// use chrono::naive::MIN_DATE;
///
/// assert_eq!(NaiveDate::from_ymd(2015, 6, 3).pred_opt(),
/// Some(NaiveDate::from_ymd(2015, 6, 2)));
/// assert_eq!(MIN_DATE.pred_opt(), None);
/// ~~~~
#[inline]
pub fn pred_opt(&self) -> Option<NaiveDate> {
self.with_of(self.of().pred()).or_else(|| NaiveDate::from_ymd_opt(self.year() - 1, 12, 31))
}
/// Adds the `days` part of given `Duration` to the current date.
///
/// Returns `None` when it will result in overflow.
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
/// use chrono::naive::MAX_DATE;
///
/// let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(d.checked_add_signed(Duration::days(40)),
/// Some(NaiveDate::from_ymd(2015, 10, 15)));
/// assert_eq!(d.checked_add_signed(Duration::days(-40)),
/// Some(NaiveDate::from_ymd(2015, 7, 27)));
/// assert_eq!(d.checked_add_signed(Duration::days(1_000_000_000)), None);
/// assert_eq!(d.checked_add_signed(Duration::days(-1_000_000_000)), None);
/// assert_eq!(MAX_DATE.checked_add_signed(Duration::days(1)), None);
/// # }
/// ~~~~
pub fn checked_add_signed(self, rhs: OldDuration) -> Option<NaiveDate> {
let year = self.year();
let (mut year_div_400, year_mod_400) = div_mod_floor(year, 400);
let cycle = internals::yo_to_cycle(year_mod_400 as u32, self.of().ordinal());
let cycle = try_opt!((cycle as i32).checked_add(try_opt!(rhs.num_days().to_i32())));
let (cycle_div_400y, cycle) = div_mod_floor(cycle, 146_097);
year_div_400 += cycle_div_400y;
let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32);
let flags = YearFlags::from_year_mod_400(year_mod_400 as i32);
NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32,
Of::new(ordinal, flags))
}
/// Subtracts the `days` part of given `Duration` from the current date.
///
/// Returns `None` when it will result in overflow.
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
/// use chrono::naive::MIN_DATE;
///
/// let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(d.checked_sub_signed(Duration::days(40)),
/// Some(NaiveDate::from_ymd(2015, 7, 27)));
/// assert_eq!(d.checked_sub_signed(Duration::days(-40)),
/// Some(NaiveDate::from_ymd(2015, 10, 15)));
/// assert_eq!(d.checked_sub_signed(Duration::days(1_000_000_000)), None);
/// assert_eq!(d.checked_sub_signed(Duration::days(-1_000_000_000)), None);
/// assert_eq!(MIN_DATE.checked_sub_signed(Duration::days(1)), None);
/// # }
/// ~~~~
pub fn checked_sub_signed(self, rhs: OldDuration) -> Option<NaiveDate> {
let year = self.year();
let (mut year_div_400, year_mod_400) = div_mod_floor(year, 400);
let cycle = internals::yo_to_cycle(year_mod_400 as u32, self.of().ordinal());
let cycle = try_opt!((cycle as i32).checked_sub(try_opt!(rhs.num_days().to_i32())));
let (cycle_div_400y, cycle) = div_mod_floor(cycle, 146_097);
year_div_400 += cycle_div_400y;
let (year_mod_400, ordinal) = internals::cycle_to_yo(cycle as u32);
let flags = YearFlags::from_year_mod_400(year_mod_400 as i32);
NaiveDate::from_of(year_div_400 * 400 + year_mod_400 as i32,
Of::new(ordinal, flags))
}
/// Subtracts another `NaiveDate` from the current date.
/// Returns a `Duration` of integral numbers.
///
/// This does not overflow or underflow at all,
/// as all possible output fits in the range of `Duration`.
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
///
/// let from_ymd = NaiveDate::from_ymd;
/// let since = NaiveDate::signed_duration_since;
///
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2014, 1, 1)), Duration::zero());
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2013, 12, 31)), Duration::days(1));
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2014, 1, 2)), Duration::days(-1));
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2013, 9, 23)), Duration::days(100));
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2013, 1, 1)), Duration::days(365));
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(2010, 1, 1)), Duration::days(365*4 + 1));
/// assert_eq!(since(from_ymd(2014, 1, 1), from_ymd(1614, 1, 1)), Duration::days(365*400 + 97));
/// # }
/// ~~~~
pub fn signed_duration_since(self, rhs: NaiveDate) -> OldDuration {
let year1 = self.year();
let year2 = rhs.year();
let (year1_div_400, year1_mod_400) = div_mod_floor(year1, 400);
let (year2_div_400, year2_mod_400) = div_mod_floor(year2, 400);
let cycle1 = i64::from(internals::yo_to_cycle(year1_mod_400 as u32, self.of().ordinal()));
let cycle2 = i64::from(internals::yo_to_cycle(year2_mod_400 as u32, rhs.of().ordinal()));
OldDuration::days((i64::from(year1_div_400) - i64::from(year2_div_400)) * 146_097 +
(cycle1 - cycle2))
}
/// Formats the date with the specified formatting items.
/// Otherwise it is same to the ordinary `format` method.
///
/// The `Iterator` of items should be `Clone`able,
/// since the resulting `DelayedFormat` value may be formatted multiple times.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
/// use chrono::format::strftime::StrftimeItems;
///
/// let fmt = StrftimeItems::new("%Y-%m-%d");
/// let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(d.format_with_items(fmt.clone()).to_string(), "2015-09-05");
/// assert_eq!(d.format("%Y-%m-%d").to_string(), "2015-09-05");
/// ~~~~
///
/// The resulting `DelayedFormat` can be formatted directly via the `Display` trait.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// # use chrono::format::strftime::StrftimeItems;
/// # let fmt = StrftimeItems::new("%Y-%m-%d").clone();
/// # let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(format!("{}", d.format_with_items(fmt)), "2015-09-05");
/// ~~~~
#[cfg(any(feature = "alloc", feature = "std", test))]
#[inline]
pub fn format_with_items<'a, I, B>(&self, items: I) -> DelayedFormat<I>
where I: Iterator<Item=B> + Clone, B: Borrow<Item<'a>> {
DelayedFormat::new(Some(*self), None, items)
}
/// Formats the date with the specified format string.
/// See the [`format::strftime` module](../format/strftime/index.html)
/// on the supported escape sequences.
///
/// This returns a `DelayedFormat`,
/// which gets converted to a string only when actual formatting happens.
/// You may use the `to_string` method to get a `String`,
/// or just feed it into `print!` and other formatting macros.
/// (In this way it avoids the redundant memory allocation.)
///
/// A wrong format string does *not* issue an error immediately.
/// Rather, converting or formatting the `DelayedFormat` fails.
/// You are recommended to immediately use `DelayedFormat` for this reason.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(d.format("%Y-%m-%d").to_string(), "2015-09-05");
/// assert_eq!(d.format("%A, %-d %B, %C%y").to_string(), "Saturday, 5 September, 2015");
/// ~~~~
///
/// The resulting `DelayedFormat` can be formatted directly via the `Display` trait.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// # let d = NaiveDate::from_ymd(2015, 9, 5);
/// assert_eq!(format!("{}", d.format("%Y-%m-%d")), "2015-09-05");
/// assert_eq!(format!("{}", d.format("%A, %-d %B, %C%y")), "Saturday, 5 September, 2015");
/// ~~~~
#[cfg(any(feature = "alloc", feature = "std", test))]
#[inline]
pub fn format<'a>(&self, fmt: &'a str) -> DelayedFormat<StrftimeItems<'a>> {
self.format_with_items(StrftimeItems::new(fmt))
}
}
impl Datelike for NaiveDate {
/// Returns the year number in the [calendar date](#calendar-date).
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).year(), 2015);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).year(), -308); // 309 BCE
/// ~~~~
#[inline]
fn year(&self) -> i32 {
self.ymdf >> 13
}
/// Returns the month number starting from 1.
///
/// The return value ranges from 1 to 12.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).month(), 9);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).month(), 3);
/// ~~~~
#[inline]
fn month(&self) -> u32 {
self.mdf().month()
}
/// Returns the month number starting from 0.
///
/// The return value ranges from 0 to 11.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).month0(), 8);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).month0(), 2);
/// ~~~~
#[inline]
fn month0(&self) -> u32 {
self.mdf().month() - 1
}
/// Returns the day of month starting from 1.
///
/// The return value ranges from 1 to 31. (The last day of month differs by months.)
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).day(), 8);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).day(), 14);
/// ~~~~
///
/// Combined with [`NaiveDate::pred`](#method.pred),
/// one can determine the number of days in a particular month.
/// (Note that this panics when `year` is out of range.)
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// fn ndays_in_month(year: i32, month: u32) -> u32 {
/// // the first day of the next month...
/// let (y, m) = if month == 12 { (year + 1, 1) } else { (year, month + 1) };
/// let d = NaiveDate::from_ymd(y, m, 1);
///
/// // ...is preceded by the last day of the original month
/// d.pred().day()
/// }
///
/// assert_eq!(ndays_in_month(2015, 8), 31);
/// assert_eq!(ndays_in_month(2015, 9), 30);
/// assert_eq!(ndays_in_month(2015, 12), 31);
/// assert_eq!(ndays_in_month(2016, 2), 29);
/// assert_eq!(ndays_in_month(2017, 2), 28);
/// ~~~~
#[inline]
fn day(&self) -> u32 {
self.mdf().day()
}
/// Returns the day of month starting from 0.
///
/// The return value ranges from 0 to 30. (The last day of month differs by months.)
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).day0(), 7);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).day0(), 13);
/// ~~~~
#[inline]
fn day0(&self) -> u32 {
self.mdf().day() - 1
}
/// Returns the day of year starting from 1.
///
/// The return value ranges from 1 to 366. (The last day of year differs by years.)
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).ordinal(), 251);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).ordinal(), 74);
/// ~~~~
///
/// Combined with [`NaiveDate::pred`](#method.pred),
/// one can determine the number of days in a particular year.
/// (Note that this panics when `year` is out of range.)
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// fn ndays_in_year(year: i32) -> u32 {
/// // the first day of the next year...
/// let d = NaiveDate::from_ymd(year + 1, 1, 1);
///
/// // ...is preceded by the last day of the original year
/// d.pred().ordinal()
/// }
///
/// assert_eq!(ndays_in_year(2015), 365);
/// assert_eq!(ndays_in_year(2016), 366);
/// assert_eq!(ndays_in_year(2017), 365);
/// assert_eq!(ndays_in_year(2000), 366);
/// assert_eq!(ndays_in_year(2100), 365);
/// ~~~~
#[inline]
fn ordinal(&self) -> u32 {
self.of().ordinal()
}
/// Returns the day of year starting from 0.
///
/// The return value ranges from 0 to 365. (The last day of year differs by years.)
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).ordinal0(), 250);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).ordinal0(), 73);
/// ~~~~
#[inline]
fn ordinal0(&self) -> u32 {
self.of().ordinal() - 1
}
/// Returns the day of week.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike, Weekday};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).weekday(), Weekday::Tue);
/// assert_eq!(NaiveDate::from_ymd(-308, 3, 14).weekday(), Weekday::Fri);
/// ~~~~
#[inline]
fn weekday(&self) -> Weekday {
self.of().weekday()
}
#[inline]
fn iso_week(&self) -> IsoWeek {
isoweek::iso_week_from_yof(self.year(), self.of())
}
/// Makes a new `NaiveDate` with the year number changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_year(2016),
/// Some(NaiveDate::from_ymd(2016, 9, 8)));
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_year(-308),
/// Some(NaiveDate::from_ymd(-308, 9, 8)));
/// ~~~~
///
/// A leap day (February 29) is a good example that this method can return `None`.
///
/// ~~~~
/// # use chrono::{NaiveDate, Datelike};
/// assert!(NaiveDate::from_ymd(2016, 2, 29).with_year(2015).is_none());
/// assert!(NaiveDate::from_ymd(2016, 2, 29).with_year(2020).is_some());
/// ~~~~
#[inline]
fn with_year(&self, year: i32) -> Option<NaiveDate> {
// we need to operate with `mdf` since we should keep the month and day number as is
let mdf = self.mdf();
// adjust the flags as needed
let flags = YearFlags::from_year(year);
let mdf = mdf.with_flags(flags);
NaiveDate::from_mdf(year, mdf)
}
/// Makes a new `NaiveDate` with the month number (starting from 1) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month(10),
/// Some(NaiveDate::from_ymd(2015, 10, 8)));
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month(13), None); // no month 13
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 30).with_month(2), None); // no February 30
/// ~~~~
#[inline]
fn with_month(&self, month: u32) -> Option<NaiveDate> {
self.with_mdf(self.mdf().with_month(month))
}
/// Makes a new `NaiveDate` with the month number (starting from 0) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month0(9),
/// Some(NaiveDate::from_ymd(2015, 10, 8)));
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_month0(12), None); // no month 13
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 30).with_month0(1), None); // no February 30
/// ~~~~
#[inline]
fn with_month0(&self, month0: u32) -> Option<NaiveDate> {
self.with_mdf(self.mdf().with_month(month0 + 1))
}
/// Makes a new `NaiveDate` with the day of month (starting from 1) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day(30),
/// Some(NaiveDate::from_ymd(2015, 9, 30)));
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day(31),
/// None); // no September 31
/// ~~~~
#[inline]
fn with_day(&self, day: u32) -> Option<NaiveDate> {
self.with_mdf(self.mdf().with_day(day))
}
/// Makes a new `NaiveDate` with the day of month (starting from 0) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day0(29),
/// Some(NaiveDate::from_ymd(2015, 9, 30)));
/// assert_eq!(NaiveDate::from_ymd(2015, 9, 8).with_day0(30),
/// None); // no September 31
/// ~~~~
#[inline]
fn with_day0(&self, day0: u32) -> Option<NaiveDate> {
self.with_mdf(self.mdf().with_day(day0 + 1))
}
/// Makes a new `NaiveDate` with the day of year (starting from 1) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal(60),
/// Some(NaiveDate::from_ymd(2015, 3, 1)));
/// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal(366),
/// None); // 2015 had only 365 days
///
/// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal(60),
/// Some(NaiveDate::from_ymd(2016, 2, 29)));
/// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal(366),
/// Some(NaiveDate::from_ymd(2016, 12, 31)));
/// ~~~~
#[inline]
fn with_ordinal(&self, ordinal: u32) -> Option<NaiveDate> {
self.with_of(self.of().with_ordinal(ordinal))
}
/// Makes a new `NaiveDate` with the day of year (starting from 0) changed.
///
/// Returns `None` when the resulting `NaiveDate` would be invalid.
///
/// # Example
///
/// ~~~~
/// use chrono::{NaiveDate, Datelike};
///
/// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal0(59),
/// Some(NaiveDate::from_ymd(2015, 3, 1)));
/// assert_eq!(NaiveDate::from_ymd(2015, 1, 1).with_ordinal0(365),
/// None); // 2015 had only 365 days
///
/// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal0(59),
/// Some(NaiveDate::from_ymd(2016, 2, 29)));
/// assert_eq!(NaiveDate::from_ymd(2016, 1, 1).with_ordinal0(365),
/// Some(NaiveDate::from_ymd(2016, 12, 31)));
/// ~~~~
#[inline]
fn with_ordinal0(&self, ordinal0: u32) -> Option<NaiveDate> {
self.with_of(self.of().with_ordinal(ordinal0 + 1))
}
}
/// An addition of `Duration` to `NaiveDate` discards the fractional days,
/// rounding to the closest integral number of days towards `Duration::zero()`.
///
/// Panics on underflow or overflow.
/// Use [`NaiveDate::checked_add_signed`](#method.checked_add_signed) to detect that.
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
///
/// let from_ymd = NaiveDate::from_ymd;
///
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::zero(), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::seconds(86399), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::seconds(-86399), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::days(1), from_ymd(2014, 1, 2));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::days(-1), from_ymd(2013, 12, 31));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::days(364), from_ymd(2014, 12, 31));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::days(365*4 + 1), from_ymd(2018, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) + Duration::days(365*400 + 97), from_ymd(2414, 1, 1));
/// # }
/// ~~~~
impl Add<OldDuration> for NaiveDate {
type Output = NaiveDate;
#[inline]
fn add(self, rhs: OldDuration) -> NaiveDate {
self.checked_add_signed(rhs).expect("`NaiveDate + Duration` overflowed")
}
}
impl AddAssign<OldDuration> for NaiveDate {
#[inline]
fn add_assign(&mut self, rhs: OldDuration) {
*self = self.add(rhs);
}
}
/// A subtraction of `Duration` from `NaiveDate` discards the fractional days,
/// rounding to the closest integral number of days towards `Duration::zero()`.
/// It is same to the addition with a negated `Duration`.
///
/// Panics on underflow or overflow.
/// Use [`NaiveDate::checked_sub_signed`](#method.checked_sub_signed) to detect that.
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
///
/// let from_ymd = NaiveDate::from_ymd;
///
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::zero(), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::seconds(86399), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::seconds(-86399), from_ymd(2014, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::days(1), from_ymd(2013, 12, 31));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::days(-1), from_ymd(2014, 1, 2));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::days(364), from_ymd(2013, 1, 2));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::days(365*4 + 1), from_ymd(2010, 1, 1));
/// assert_eq!(from_ymd(2014, 1, 1) - Duration::days(365*400 + 97), from_ymd(1614, 1, 1));
/// # }
/// ~~~~
impl Sub<OldDuration> for NaiveDate {
type Output = NaiveDate;
#[inline]
fn sub(self, rhs: OldDuration) -> NaiveDate {
self.checked_sub_signed(rhs).expect("`NaiveDate - Duration` overflowed")
}
}
impl SubAssign<OldDuration> for NaiveDate {
#[inline]
fn sub_assign(&mut self, rhs: OldDuration) {
*self = self.sub(rhs);
}
}
/// Subtracts another `NaiveDate` from the current date.
/// Returns a `Duration` of integral numbers.
///
/// This does not overflow or underflow at all,
/// as all possible output fits in the range of `Duration`.
///
/// The implementation is a wrapper around
/// [`NaiveDate::signed_duration_since`](#method.signed_duration_since).
///
/// # Example
///
/// ~~~~
/// # extern crate chrono; fn main() {
/// use chrono::{NaiveDate, Duration};
///
/// let from_ymd = NaiveDate::from_ymd;
///
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2014, 1, 1), Duration::zero());
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2013, 12, 31), Duration::days(1));
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2014, 1, 2), Duration::days(-1));
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2013, 9, 23), Duration::days(100));
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2013, 1, 1), Duration::days(365));
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(2010, 1, 1), Duration::days(365*4 + 1));
/// assert_eq!(from_ymd(2014, 1, 1) - from_ymd(1614, 1, 1), Duration::days(365*400 + 97));
/// # }
/// ~~~~
impl Sub<NaiveDate> for NaiveDate {
type Output = OldDuration;
#[inline]
fn sub(self, rhs: NaiveDate) -> OldDuration {
self.signed_duration_since(rhs)
}
}
/// The `Debug` output of the naive date `d` is same to
/// [`d.format("%Y-%m-%d")`](../format/strftime/index.html).
///
/// The string printed can be readily parsed via the `parse` method on `str`.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(2015, 9, 5)), "2015-09-05");
/// assert_eq!(format!("{:?}", NaiveDate::from_ymd( 0, 1, 1)), "0000-01-01");
/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(9999, 12, 31)), "9999-12-31");
/// ~~~~
///
/// ISO 8601 requires an explicit sign for years before 1 BCE or after 9999 CE.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// assert_eq!(format!("{:?}", NaiveDate::from_ymd( -1, 1, 1)), "-0001-01-01");
/// assert_eq!(format!("{:?}", NaiveDate::from_ymd(10000, 12, 31)), "+10000-12-31");
/// ~~~~
impl fmt::Debug for NaiveDate {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let year = self.year();
let mdf = self.mdf();
if 0 <= year && year <= 9999 {
write!(f, "{:04}-{:02}-{:02}", year, mdf.month(), mdf.day())
} else {
// ISO 8601 requires the explicit sign for out-of-range years
write!(f, "{:+05}-{:02}-{:02}", year, mdf.month(), mdf.day())
}
}
}
/// The `Display` output of the naive date `d` is same to
/// [`d.format("%Y-%m-%d")`](../format/strftime/index.html).
///
/// The string printed can be readily parsed via the `parse` method on `str`.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// assert_eq!(format!("{}", NaiveDate::from_ymd(2015, 9, 5)), "2015-09-05");
/// assert_eq!(format!("{}", NaiveDate::from_ymd( 0, 1, 1)), "0000-01-01");
/// assert_eq!(format!("{}", NaiveDate::from_ymd(9999, 12, 31)), "9999-12-31");
/// ~~~~
///
/// ISO 8601 requires an explicit sign for years before 1 BCE or after 9999 CE.
///
/// ~~~~
/// # use chrono::NaiveDate;
/// assert_eq!(format!("{}", NaiveDate::from_ymd( -1, 1, 1)), "-0001-01-01");
/// assert_eq!(format!("{}", NaiveDate::from_ymd(10000, 12, 31)), "+10000-12-31");
/// ~~~~
impl fmt::Display for NaiveDate {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::Debug::fmt(self, f) }
}
/// Parsing a `str` into a `NaiveDate` uses the same format,
/// [`%Y-%m-%d`](../format/strftime/index.html), as in `Debug` and `Display`.
///
/// # Example
///
/// ~~~~
/// use chrono::NaiveDate;
///
/// let d = NaiveDate::from_ymd(2015, 9, 18);
/// assert_eq!("2015-09-18".parse::<NaiveDate>(), Ok(d));
///
/// let d = NaiveDate::from_ymd(12345, 6, 7);
/// assert_eq!("+12345-6-7".parse::<NaiveDate>(), Ok(d));
///
/// assert!("foo".parse::<NaiveDate>().is_err());
/// ~~~~
impl str::FromStr for NaiveDate {
type Err = ParseError;
fn from_str(s: &str) -> ParseResult<NaiveDate> {
const ITEMS: &'static [Item<'static>] = &[
Item::Numeric(Numeric::Year, Pad::Zero),
Item::Space(""), Item::Literal("-"),
Item::Numeric(Numeric::Month, Pad::Zero),
Item::Space(""), Item::Literal("-"),
Item::Numeric(Numeric::Day, Pad::Zero),
Item::Space(""),
];
let mut parsed = Parsed::new();
parse(&mut parsed, s, ITEMS.iter())?;
parsed.to_naive_date()
}
}
#[cfg(all(test, any(feature = "rustc-serialize", feature = "serde")))]
fn test_encodable_json<F, E>(to_string: F)
where F: Fn(&NaiveDate) -> Result<String, E>, E: ::std::fmt::Debug
{
assert_eq!(to_string(&NaiveDate::from_ymd(2014, 7, 24)).ok(),
Some(r#""2014-07-24""#.into()));
assert_eq!(to_string(&NaiveDate::from_ymd(0, 1, 1)).ok(),
Some(r#""0000-01-01""#.into()));
assert_eq!(to_string(&NaiveDate::from_ymd(-1, 12, 31)).ok(),
Some(r#""-0001-12-31""#.into()));
assert_eq!(to_string(&MIN_DATE).ok(),
Some(r#""-262144-01-01""#.into()));
assert_eq!(to_string(&MAX_DATE).ok(),
Some(r#""+262143-12-31""#.into()));
}
#[cfg(all(test, any(feature = "rustc-serialize", feature = "serde")))]
fn test_decodable_json<F, E>(from_str: F)
where F: Fn(&str) -> Result<NaiveDate, E>, E: ::std::fmt::Debug
{
use std::{i32, i64};
assert_eq!(from_str(r#""2016-07-08""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8)));
assert_eq!(from_str(r#""2016-7-8""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8)));
assert_eq!(from_str(r#""+002016-07-08""#).ok(), Some(NaiveDate::from_ymd(2016, 7, 8)));
assert_eq!(from_str(r#""0000-01-01""#).ok(), Some(NaiveDate::from_ymd(0, 1, 1)));
assert_eq!(from_str(r#""0-1-1""#).ok(), Some(NaiveDate::from_ymd(0, 1, 1)));
assert_eq!(from_str(r#""-0001-12-31""#).ok(), Some(NaiveDate::from_ymd(-1, 12, 31)));
assert_eq!(from_str(r#""-262144-01-01""#).ok(), Some(MIN_DATE));
assert_eq!(from_str(r#""+262143-12-31""#).ok(), Some(MAX_DATE));
// bad formats
assert!(from_str(r#""""#).is_err());
assert!(from_str(r#""20001231""#).is_err());
assert!(from_str(r#""2000-00-00""#).is_err());
assert!(from_str(r#""2000-02-30""#).is_err());
assert!(from_str(r#""2001-02-29""#).is_err());
assert!(from_str(r#""2002-002-28""#).is_err());
assert!(from_str(r#""yyyy-mm-dd""#).is_err());
assert!(from_str(r#"0"#).is_err());
assert!(from_str(r#"20.01"#).is_err());
assert!(from_str(&i32::MIN.to_string()).is_err());
assert!(from_str(&i32::MAX.to_string()).is_err());
assert!(from_str(&i64::MIN.to_string()).is_err());
assert!(from_str(&i64::MAX.to_string()).is_err());
assert!(from_str(r#"{}"#).is_err());
// pre-0.3.0 rustc-serialize format is now invalid
assert!(from_str(r#"{"ymdf":20}"#).is_err());
assert!(from_str(r#"null"#).is_err());
}
#[cfg(feature = "rustc-serialize")]
mod rustc_serialize {
use super::NaiveDate;
use rustc_serialize::{Encodable, Encoder, Decodable, Decoder};
impl Encodable for NaiveDate {
fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
format!("{:?}", self).encode(s)
}
}
impl Decodable for NaiveDate {
fn decode<D: Decoder>(d: &mut D) -> Result<NaiveDate, D::Error> {
d.read_str()?.parse().map_err(|_| d.error("invalid date"))
}
}
#[cfg(test)] use rustc_serialize::json;
#[test]
fn test_encodable() {
super::test_encodable_json(json::encode);
}
#[test]
fn test_decodable() {
super::test_decodable_json(json::decode);
}
}
#[cfg(feature = "serde")]
mod serde {
use core::fmt;
use super::NaiveDate;
use serdelib::{ser, de};
// TODO not very optimized for space (binary formats would want something better)
impl ser::Serialize for NaiveDate {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: ser::Serializer
{
struct FormatWrapped<'a, D: 'a> {
inner: &'a D
}
impl<'a, D: fmt::Debug> fmt::Display for FormatWrapped<'a, D> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(f)
}
}
serializer.collect_str(&FormatWrapped { inner: &self })
}
}
struct NaiveDateVisitor;
impl<'de> de::Visitor<'de> for NaiveDateVisitor {
type Value = NaiveDate;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result
{
write!(formatter, "a formatted date string")
}
#[cfg(any(feature = "std", test))]
fn visit_str<E>(self, value: &str) -> Result<NaiveDate, E>
where E: de::Error
{
value.parse().map_err(E::custom)
}
#[cfg(not(any(feature = "std", test)))]
fn visit_str<E>(self, value: &str) -> Result<NaiveDate, E>
where E: de::Error
{
value.parse().map_err(E::custom)
}
}
impl<'de> de::Deserialize<'de> for NaiveDate {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: de::Deserializer<'de>
{
deserializer.deserialize_str(NaiveDateVisitor)
}
}
#[cfg(test)] extern crate serde_json;
#[cfg(test)] extern crate bincode;
#[test]
fn test_serde_serialize() {
super::test_encodable_json(self::serde_json::to_string);
}
#[test]
fn test_serde_deserialize() {
super::test_decodable_json(|input| self::serde_json::from_str(&input));
}
#[test]
fn test_serde_bincode() {
// Bincode is relevant to test separately from JSON because
// it is not self-describing.
use self::bincode::{Infinite, serialize, deserialize};
let d = NaiveDate::from_ymd(2014, 7, 24);
let encoded = serialize(&d, Infinite).unwrap();
let decoded: NaiveDate = deserialize(&encoded).unwrap();
assert_eq!(d, decoded);
}
}
#[cfg(test)]
mod tests {
use super::NaiveDate;
use super::{MIN_DATE, MIN_YEAR, MIN_DAYS_FROM_YEAR_0};
use super::{MAX_DATE, MAX_YEAR, MAX_DAYS_FROM_YEAR_0};
use {Datelike, Weekday};
use std::{i32, u32};
use oldtime::Duration;
#[test]
fn test_date_from_ymd() {
let ymd_opt = |y,m,d| NaiveDate::from_ymd_opt(y, m, d);
assert!(ymd_opt(2012, 0, 1).is_none());
assert!(ymd_opt(2012, 1, 1).is_some());
assert!(ymd_opt(2012, 2, 29).is_some());
assert!(ymd_opt(2014, 2, 29).is_none());
assert!(ymd_opt(2014, 3, 0).is_none());
assert!(ymd_opt(2014, 3, 1).is_some());
assert!(ymd_opt(2014, 3, 31).is_some());
assert!(ymd_opt(2014, 3, 32).is_none());
assert!(ymd_opt(2014, 12, 31).is_some());
assert!(ymd_opt(2014, 13, 1).is_none());
}
#[test]
fn test_date_from_yo() {
let yo_opt = |y,o| NaiveDate::from_yo_opt(y, o);
let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d);
assert_eq!(yo_opt(2012, 0), None);
assert_eq!(yo_opt(2012, 1), Some(ymd(2012, 1, 1)));
assert_eq!(yo_opt(2012, 2), Some(ymd(2012, 1, 2)));
assert_eq!(yo_opt(2012, 32), Some(ymd(2012, 2, 1)));
assert_eq!(yo_opt(2012, 60), Some(ymd(2012, 2, 29)));
assert_eq!(yo_opt(2012, 61), Some(ymd(2012, 3, 1)));
assert_eq!(yo_opt(2012, 100), Some(ymd(2012, 4, 9)));
assert_eq!(yo_opt(2012, 200), Some(ymd(2012, 7, 18)));
assert_eq!(yo_opt(2012, 300), Some(ymd(2012, 10, 26)));
assert_eq!(yo_opt(2012, 366), Some(ymd(2012, 12, 31)));
assert_eq!(yo_opt(2012, 367), None);
assert_eq!(yo_opt(2014, 0), None);
assert_eq!(yo_opt(2014, 1), Some(ymd(2014, 1, 1)));
assert_eq!(yo_opt(2014, 2), Some(ymd(2014, 1, 2)));
assert_eq!(yo_opt(2014, 32), Some(ymd(2014, 2, 1)));
assert_eq!(yo_opt(2014, 59), Some(ymd(2014, 2, 28)));
assert_eq!(yo_opt(2014, 60), Some(ymd(2014, 3, 1)));
assert_eq!(yo_opt(2014, 100), Some(ymd(2014, 4, 10)));
assert_eq!(yo_opt(2014, 200), Some(ymd(2014, 7, 19)));
assert_eq!(yo_opt(2014, 300), Some(ymd(2014, 10, 27)));
assert_eq!(yo_opt(2014, 365), Some(ymd(2014, 12, 31)));
assert_eq!(yo_opt(2014, 366), None);
}
#[test]
fn test_date_from_isoywd() {
let isoywd_opt = |y,w,d| NaiveDate::from_isoywd_opt(y, w, d);
let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d);
assert_eq!(isoywd_opt(2004, 0, Weekday::Sun), None);
assert_eq!(isoywd_opt(2004, 1, Weekday::Mon), Some(ymd(2003, 12, 29)));
assert_eq!(isoywd_opt(2004, 1, Weekday::Sun), Some(ymd(2004, 1, 4)));
assert_eq!(isoywd_opt(2004, 2, Weekday::Mon), Some(ymd(2004, 1, 5)));
assert_eq!(isoywd_opt(2004, 2, Weekday::Sun), Some(ymd(2004, 1, 11)));
assert_eq!(isoywd_opt(2004, 52, Weekday::Mon), Some(ymd(2004, 12, 20)));
assert_eq!(isoywd_opt(2004, 52, Weekday::Sun), Some(ymd(2004, 12, 26)));
assert_eq!(isoywd_opt(2004, 53, Weekday::Mon), Some(ymd(2004, 12, 27)));
assert_eq!(isoywd_opt(2004, 53, Weekday::Sun), Some(ymd(2005, 1, 2)));
assert_eq!(isoywd_opt(2004, 54, Weekday::Mon), None);
assert_eq!(isoywd_opt(2011, 0, Weekday::Sun), None);
assert_eq!(isoywd_opt(2011, 1, Weekday::Mon), Some(ymd(2011, 1, 3)));
assert_eq!(isoywd_opt(2011, 1, Weekday::Sun), Some(ymd(2011, 1, 9)));
assert_eq!(isoywd_opt(2011, 2, Weekday::Mon), Some(ymd(2011, 1, 10)));
assert_eq!(isoywd_opt(2011, 2, Weekday::Sun), Some(ymd(2011, 1, 16)));
assert_eq!(isoywd_opt(2018, 51, Weekday::Mon), Some(ymd(2018, 12, 17)));
assert_eq!(isoywd_opt(2018, 51, Weekday::Sun), Some(ymd(2018, 12, 23)));
assert_eq!(isoywd_opt(2018, 52, Weekday::Mon), Some(ymd(2018, 12, 24)));
assert_eq!(isoywd_opt(2018, 52, Weekday::Sun), Some(ymd(2018, 12, 30)));
assert_eq!(isoywd_opt(2018, 53, Weekday::Mon), None);
}
#[test]
fn test_date_from_isoywd_and_iso_week() {
for year in 2000..2401 {
for week in 1..54 {
for &weekday in [Weekday::Mon, Weekday::Tue, Weekday::Wed, Weekday::Thu,
Weekday::Fri, Weekday::Sat, Weekday::Sun].iter() {
let d = NaiveDate::from_isoywd_opt(year, week, weekday);
if d.is_some() {
let d = d.unwrap();
assert_eq!(d.weekday(), weekday);
let w = d.iso_week();
assert_eq!(w.year(), year);
assert_eq!(w.week(), week);
}
}
}
}
for year in 2000..2401 {
for month in 1..13 {
for day in 1..32 {
let d = NaiveDate::from_ymd_opt(year, month, day);
if d.is_some() {
let d = d.unwrap();
let w = d.iso_week();
let d_ = NaiveDate::from_isoywd(w.year(), w.week(), d.weekday());
assert_eq!(d, d_);
}
}
}
}
}
#[test]
fn test_date_from_num_days_from_ce() {
let from_ndays_from_ce = |days| NaiveDate::from_num_days_from_ce_opt(days);
assert_eq!(from_ndays_from_ce(1), Some(NaiveDate::from_ymd(1, 1, 1)));
assert_eq!(from_ndays_from_ce(2), Some(NaiveDate::from_ymd(1, 1, 2)));
assert_eq!(from_ndays_from_ce(31), Some(NaiveDate::from_ymd(1, 1, 31)));
assert_eq!(from_ndays_from_ce(32), Some(NaiveDate::from_ymd(1, 2, 1)));
assert_eq!(from_ndays_from_ce(59), Some(NaiveDate::from_ymd(1, 2, 28)));
assert_eq!(from_ndays_from_ce(60), Some(NaiveDate::from_ymd(1, 3, 1)));
assert_eq!(from_ndays_from_ce(365), Some(NaiveDate::from_ymd(1, 12, 31)));
assert_eq!(from_ndays_from_ce(365*1 + 1), Some(NaiveDate::from_ymd(2, 1, 1)));
assert_eq!(from_ndays_from_ce(365*2 + 1), Some(NaiveDate::from_ymd(3, 1, 1)));
assert_eq!(from_ndays_from_ce(365*3 + 1), Some(NaiveDate::from_ymd(4, 1, 1)));
assert_eq!(from_ndays_from_ce(365*4 + 2), Some(NaiveDate::from_ymd(5, 1, 1)));
assert_eq!(from_ndays_from_ce(146097 + 1), Some(NaiveDate::from_ymd(401, 1, 1)));
assert_eq!(from_ndays_from_ce(146097*5 + 1), Some(NaiveDate::from_ymd(2001, 1, 1)));
assert_eq!(from_ndays_from_ce(719163), Some(NaiveDate::from_ymd(1970, 1, 1)));
assert_eq!(from_ndays_from_ce(0), Some(NaiveDate::from_ymd(0, 12, 31))); // 1 BCE
assert_eq!(from_ndays_from_ce(-365), Some(NaiveDate::from_ymd(0, 1, 1)));
assert_eq!(from_ndays_from_ce(-366), Some(NaiveDate::from_ymd(-1, 12, 31))); // 2 BCE
for days in (-9999..10001).map(|x| x * 100) {
assert_eq!(from_ndays_from_ce(days).map(|d| d.num_days_from_ce()), Some(days));
}
assert_eq!(from_ndays_from_ce(MIN_DATE.num_days_from_ce()), Some(MIN_DATE));
assert_eq!(from_ndays_from_ce(MIN_DATE.num_days_from_ce() - 1), None);
assert_eq!(from_ndays_from_ce(MAX_DATE.num_days_from_ce()), Some(MAX_DATE));
assert_eq!(from_ndays_from_ce(MAX_DATE.num_days_from_ce() + 1), None);
}
#[test]
fn test_date_from_weekday_of_month_opt() {
let ymwd = |y,m,w,n| NaiveDate::from_weekday_of_month_opt(y,m,w,n);
assert_eq!(ymwd(2018, 8, Weekday::Tue, 0), None);
assert_eq!(ymwd(2018, 8, Weekday::Wed, 1), Some(NaiveDate::from_ymd(2018, 8, 1)));
assert_eq!(ymwd(2018, 8, Weekday::Thu, 1), Some(NaiveDate::from_ymd(2018, 8, 2)));
assert_eq!(ymwd(2018, 8, Weekday::Sun, 1), Some(NaiveDate::from_ymd(2018, 8, 5)));
assert_eq!(ymwd(2018, 8, Weekday::Mon, 1), Some(NaiveDate::from_ymd(2018, 8, 6)));
assert_eq!(ymwd(2018, 8, Weekday::Tue, 1), Some(NaiveDate::from_ymd(2018, 8, 7)));
assert_eq!(ymwd(2018, 8, Weekday::Wed, 2), Some(NaiveDate::from_ymd(2018, 8, 8)));
assert_eq!(ymwd(2018, 8, Weekday::Sun, 2), Some(NaiveDate::from_ymd(2018, 8, 12)));
assert_eq!(ymwd(2018, 8, Weekday::Thu, 3), Some(NaiveDate::from_ymd(2018, 8, 16)));
assert_eq!(ymwd(2018, 8, Weekday::Thu, 4), Some(NaiveDate::from_ymd(2018, 8, 23)));
assert_eq!(ymwd(2018, 8, Weekday::Thu, 5), Some(NaiveDate::from_ymd(2018, 8, 30)));
assert_eq!(ymwd(2018, 8, Weekday::Fri, 5), Some(NaiveDate::from_ymd(2018, 8, 31)));
assert_eq!(ymwd(2018, 8, Weekday::Sat, 5), None);
}
#[test]
fn test_date_fields() {
fn check(year: i32, month: u32, day: u32, ordinal: u32) {
let d1 = NaiveDate::from_ymd(year, month, day);
assert_eq!(d1.year(), year);
assert_eq!(d1.month(), month);
assert_eq!(d1.day(), day);
assert_eq!(d1.ordinal(), ordinal);
let d2 = NaiveDate::from_yo(year, ordinal);
assert_eq!(d2.year(), year);
assert_eq!(d2.month(), month);
assert_eq!(d2.day(), day);
assert_eq!(d2.ordinal(), ordinal);
assert_eq!(d1, d2);
}
check(2012, 1, 1, 1);
check(2012, 1, 2, 2);
check(2012, 2, 1, 32);
check(2012, 2, 29, 60);
check(2012, 3, 1, 61);
check(2012, 4, 9, 100);
check(2012, 7, 18, 200);
check(2012, 10, 26, 300);
check(2012, 12, 31, 366);
check(2014, 1, 1, 1);
check(2014, 1, 2, 2);
check(2014, 2, 1, 32);
check(2014, 2, 28, 59);
check(2014, 3, 1, 60);
check(2014, 4, 10, 100);
check(2014, 7, 19, 200);
check(2014, 10, 27, 300);
check(2014, 12, 31, 365);
}
#[test]
fn test_date_weekday() {
assert_eq!(NaiveDate::from_ymd(1582, 10, 15).weekday(), Weekday::Fri);
// May 20, 1875 = ISO 8601 reference date
assert_eq!(NaiveDate::from_ymd(1875, 5, 20).weekday(), Weekday::Thu);
assert_eq!(NaiveDate::from_ymd(2000, 1, 1).weekday(), Weekday::Sat);
}
#[test]
fn test_date_with_fields() {
let d = NaiveDate::from_ymd(2000, 2, 29);
assert_eq!(d.with_year(-400), Some(NaiveDate::from_ymd(-400, 2, 29)));
assert_eq!(d.with_year(-100), None);
assert_eq!(d.with_year(1600), Some(NaiveDate::from_ymd(1600, 2, 29)));
assert_eq!(d.with_year(1900), None);
assert_eq!(d.with_year(2000), Some(NaiveDate::from_ymd(2000, 2, 29)));
assert_eq!(d.with_year(2001), None);
assert_eq!(d.with_year(2004), Some(NaiveDate::from_ymd(2004, 2, 29)));
assert_eq!(d.with_year(i32::MAX), None);
let d = NaiveDate::from_ymd(2000, 4, 30);
assert_eq!(d.with_month(0), None);
assert_eq!(d.with_month(1), Some(NaiveDate::from_ymd(2000, 1, 30)));
assert_eq!(d.with_month(2), None);
assert_eq!(d.with_month(3), Some(NaiveDate::from_ymd(2000, 3, 30)));
assert_eq!(d.with_month(4), Some(NaiveDate::from_ymd(2000, 4, 30)));
assert_eq!(d.with_month(12), Some(NaiveDate::from_ymd(2000, 12, 30)));
assert_eq!(d.with_month(13), None);
assert_eq!(d.with_month(u32::MAX), None);
let d = NaiveDate::from_ymd(2000, 2, 8);
assert_eq!(d.with_day(0), None);
assert_eq!(d.with_day(1), Some(NaiveDate::from_ymd(2000, 2, 1)));
assert_eq!(d.with_day(29), Some(NaiveDate::from_ymd(2000, 2, 29)));
assert_eq!(d.with_day(30), None);
assert_eq!(d.with_day(u32::MAX), None);
let d = NaiveDate::from_ymd(2000, 5, 5);
assert_eq!(d.with_ordinal(0), None);
assert_eq!(d.with_ordinal(1), Some(NaiveDate::from_ymd(2000, 1, 1)));
assert_eq!(d.with_ordinal(60), Some(NaiveDate::from_ymd(2000, 2, 29)));
assert_eq!(d.with_ordinal(61), Some(NaiveDate::from_ymd(2000, 3, 1)));
assert_eq!(d.with_ordinal(366), Some(NaiveDate::from_ymd(2000, 12, 31)));
assert_eq!(d.with_ordinal(367), None);
assert_eq!(d.with_ordinal(u32::MAX), None);
}
#[test]
fn test_date_num_days_from_ce() {
assert_eq!(NaiveDate::from_ymd(1, 1, 1).num_days_from_ce(), 1);
for year in -9999..10001 {
assert_eq!(NaiveDate::from_ymd(year, 1, 1).num_days_from_ce(),
NaiveDate::from_ymd(year - 1, 12, 31).num_days_from_ce() + 1);
}
}
#[test]
fn test_date_succ() {
let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d);
assert_eq!(ymd(2014, 5, 6).succ_opt(), Some(ymd(2014, 5, 7)));
assert_eq!(ymd(2014, 5, 31).succ_opt(), Some(ymd(2014, 6, 1)));
assert_eq!(ymd(2014, 12, 31).succ_opt(), Some(ymd(2015, 1, 1)));
assert_eq!(ymd(2016, 2, 28).succ_opt(), Some(ymd(2016, 2, 29)));
assert_eq!(ymd(MAX_DATE.year(), 12, 31).succ_opt(), None);
}
#[test]
fn test_date_pred() {
let ymd = |y,m,d| NaiveDate::from_ymd(y, m, d);
assert_eq!(ymd(2016, 3, 1).pred_opt(), Some(ymd(2016, 2, 29)));
assert_eq!(ymd(2015, 1, 1).pred_opt(), Some(ymd(2014, 12, 31)));
assert_eq!(ymd(2014, 6, 1).pred_opt(), Some(ymd(2014, 5, 31)));
assert_eq!(ymd(2014, 5, 7).pred_opt(), Some(ymd(2014, 5, 6)));
assert_eq!(ymd(MIN_DATE.year(), 1, 1).pred_opt(), None);
}
#[test]
fn test_date_add() {
fn check((y1,m1,d1): (i32, u32, u32), rhs: Duration, ymd: Option<(i32, u32, u32)>) {
let lhs = NaiveDate::from_ymd(y1, m1, d1);
let sum = ymd.map(|(y,m,d)| NaiveDate::from_ymd(y, m, d));
assert_eq!(lhs.checked_add_signed(rhs), sum);
assert_eq!(lhs.checked_sub_signed(-rhs), sum);
}
check((2014, 1, 1), Duration::zero(), Some((2014, 1, 1)));
check((2014, 1, 1), Duration::seconds(86399), Some((2014, 1, 1)));
// always round towards zero
check((2014, 1, 1), Duration::seconds(-86399), Some((2014, 1, 1)));
check((2014, 1, 1), Duration::days(1), Some((2014, 1, 2)));
check((2014, 1, 1), Duration::days(-1), Some((2013, 12, 31)));
check((2014, 1, 1), Duration::days(364), Some((2014, 12, 31)));
check((2014, 1, 1), Duration::days(365*4 + 1), Some((2018, 1, 1)));
check((2014, 1, 1), Duration::days(365*400 + 97), Some((2414, 1, 1)));
check((-7, 1, 1), Duration::days(365*12 + 3), Some((5, 1, 1)));
// overflow check
check((0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64), Some((MAX_YEAR, 12, 31)));
check((0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64 + 1), None);
check((0, 1, 1), Duration::max_value(), None);
check((0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64), Some((MIN_YEAR, 1, 1)));
check((0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64 - 1), None);
check((0, 1, 1), Duration::min_value(), None);
}
#[test]
fn test_date_sub() {
fn check((y1,m1,d1): (i32, u32, u32), (y2,m2,d2): (i32, u32, u32), diff: Duration) {
let lhs = NaiveDate::from_ymd(y1, m1, d1);
let rhs = NaiveDate::from_ymd(y2, m2, d2);
assert_eq!(lhs.signed_duration_since(rhs), diff);
assert_eq!(rhs.signed_duration_since(lhs), -diff);
}
check((2014, 1, 1), (2014, 1, 1), Duration::zero());
check((2014, 1, 2), (2014, 1, 1), Duration::days(1));
check((2014, 12, 31), (2014, 1, 1), Duration::days(364));
check((2015, 1, 3), (2014, 1, 1), Duration::days(365 + 2));
check((2018, 1, 1), (2014, 1, 1), Duration::days(365*4 + 1));
check((2414, 1, 1), (2014, 1, 1), Duration::days(365*400 + 97));
check((MAX_YEAR, 12, 31), (0, 1, 1), Duration::days(MAX_DAYS_FROM_YEAR_0 as i64));
check((MIN_YEAR, 1, 1), (0, 1, 1), Duration::days(MIN_DAYS_FROM_YEAR_0 as i64));
}
#[test]
fn test_date_addassignment() {
let ymd = NaiveDate::from_ymd;
let mut date = ymd(2016, 10, 1);
date += Duration::days(10);
assert_eq!(date, ymd(2016, 10, 11));
date += Duration::days(30);
assert_eq!(date, ymd(2016, 11, 10));
}
#[test]
fn test_date_subassignment() {
let ymd = NaiveDate::from_ymd;
let mut date = ymd(2016, 10, 11);
date -= Duration::days(10);
assert_eq!(date, ymd(2016, 10, 1));
date -= Duration::days(2);
assert_eq!(date, ymd(2016, 9, 29));
}
#[test]
fn test_date_fmt() {
assert_eq!(format!("{:?}", NaiveDate::from_ymd(2012, 3, 4)), "2012-03-04");
assert_eq!(format!("{:?}", NaiveDate::from_ymd(0, 3, 4)), "0000-03-04");
assert_eq!(format!("{:?}", NaiveDate::from_ymd(-307, 3, 4)), "-0307-03-04");
assert_eq!(format!("{:?}", NaiveDate::from_ymd(12345, 3, 4)), "+12345-03-04");
assert_eq!(NaiveDate::from_ymd(2012, 3, 4).to_string(), "2012-03-04");
assert_eq!(NaiveDate::from_ymd(0, 3, 4).to_string(), "0000-03-04");
assert_eq!(NaiveDate::from_ymd(-307, 3, 4).to_string(), "-0307-03-04");
assert_eq!(NaiveDate::from_ymd(12345, 3, 4).to_string(), "+12345-03-04");
// the format specifier should have no effect on `NaiveTime`
assert_eq!(format!("{:+30?}", NaiveDate::from_ymd(1234, 5, 6)), "1234-05-06");
assert_eq!(format!("{:30?}", NaiveDate::from_ymd(12345, 6, 7)), "+12345-06-07");
}
#[test]
fn test_date_from_str() {
// valid cases
let valid = [
"-0000000123456-1-2",
" -123456 - 1 - 2 ",
"-12345-1-2",
"-1234-12-31",
"-7-6-5",
"350-2-28",
"360-02-29",
"0360-02-29",
"2015-2 -18",
"+70-2-18",
"+70000-2-18",
"+00007-2-18",
];
for &s in &valid {
let d = match s.parse::<NaiveDate>() {
Ok(d) => d,
Err(e) => panic!("parsing `{}` has failed: {}", s, e)
};
let s_ = format!("{:?}", d);
// `s` and `s_` may differ, but `s.parse()` and `s_.parse()` must be same
let d_ = match s_.parse::<NaiveDate>() {
Ok(d) => d,
Err(e) => panic!("`{}` is parsed into `{:?}`, but reparsing that has failed: {}",
s, d, e)
};
assert!(d == d_, "`{}` is parsed into `{:?}`, but reparsed result \
`{:?}` does not match", s, d, d_);
}
// some invalid cases
// since `ParseErrorKind` is private, all we can do is to check if there was an error
assert!("".parse::<NaiveDate>().is_err());
assert!("x".parse::<NaiveDate>().is_err());
assert!("2014".parse::<NaiveDate>().is_err());
assert!("2014-01".parse::<NaiveDate>().is_err());
assert!("2014-01-00".parse::<NaiveDate>().is_err());
assert!("2014-13-57".parse::<NaiveDate>().is_err());
assert!("9999999-9-9".parse::<NaiveDate>().is_err()); // out-of-bounds
}
#[test]
fn test_date_parse_from_str() {
let ymd = |y,m,d| NaiveDate::from_ymd(y,m,d);
assert_eq!(NaiveDate::parse_from_str("2014-5-7T12:34:56+09:30", "%Y-%m-%dT%H:%M:%S%z"),
Ok(ymd(2014, 5, 7))); // ignore time and offset
assert_eq!(NaiveDate::parse_from_str("2015-W06-1=2015-033", "%G-W%V-%u = %Y-%j"),
Ok(ymd(2015, 2, 2)));
assert_eq!(NaiveDate::parse_from_str("Fri, 09 Aug 13", "%a, %d %b %y"),
Ok(ymd(2013, 8, 9)));
assert!(NaiveDate::parse_from_str("Sat, 09 Aug 2013", "%a, %d %b %Y").is_err());
assert!(NaiveDate::parse_from_str("2014-57", "%Y-%m-%d").is_err());
assert!(NaiveDate::parse_from_str("2014", "%Y").is_err()); // insufficient
}
#[test]
fn test_date_format() {
let d = NaiveDate::from_ymd(2012, 3, 4);
assert_eq!(d.format("%Y,%C,%y,%G,%g").to_string(), "2012,20,12,2012,12");
assert_eq!(d.format("%m,%b,%h,%B").to_string(), "03,Mar,Mar,March");
assert_eq!(d.format("%d,%e").to_string(), "04, 4");
assert_eq!(d.format("%U,%W,%V").to_string(), "10,09,09");
assert_eq!(d.format("%a,%A,%w,%u").to_string(), "Sun,Sunday,0,7");
assert_eq!(d.format("%j").to_string(), "064"); // since 2012 is a leap year
assert_eq!(d.format("%D,%x").to_string(), "03/04/12,03/04/12");
assert_eq!(d.format("%F").to_string(), "2012-03-04");
assert_eq!(d.format("%v").to_string(), " 4-Mar-2012");
assert_eq!(d.format("%t%n%%%n%t").to_string(), "\t\n%\n\t");
// non-four-digit years
assert_eq!(NaiveDate::from_ymd(12345, 1, 1).format("%Y").to_string(), "+12345");
assert_eq!(NaiveDate::from_ymd(1234, 1, 1).format("%Y").to_string(), "1234");
assert_eq!(NaiveDate::from_ymd(123, 1, 1).format("%Y").to_string(), "0123");
assert_eq!(NaiveDate::from_ymd(12, 1, 1).format("%Y").to_string(), "0012");
assert_eq!(NaiveDate::from_ymd(1, 1, 1).format("%Y").to_string(), "0001");
assert_eq!(NaiveDate::from_ymd(0, 1, 1).format("%Y").to_string(), "0000");
assert_eq!(NaiveDate::from_ymd(-1, 1, 1).format("%Y").to_string(), "-0001");
assert_eq!(NaiveDate::from_ymd(-12, 1, 1).format("%Y").to_string(), "-0012");
assert_eq!(NaiveDate::from_ymd(-123, 1, 1).format("%Y").to_string(), "-0123");
assert_eq!(NaiveDate::from_ymd(-1234, 1, 1).format("%Y").to_string(), "-1234");
assert_eq!(NaiveDate::from_ymd(-12345, 1, 1).format("%Y").to_string(), "-12345");
// corner cases
assert_eq!(NaiveDate::from_ymd(2007, 12, 31).format("%G,%g,%U,%W,%V").to_string(),
"2008,08,53,53,01");
assert_eq!(NaiveDate::from_ymd(2010, 1, 3).format("%G,%g,%U,%W,%V").to_string(),
"2009,09,01,00,53");
}
}
| 40.316485 | 108 | 0.570757 |
0e60aaa285a364ff1787bdb3a1cdd8817bb097c3
| 1,780 |
#[macro_use]
extern crate criterion;
extern crate fancy_garbling;
use criterion::Criterion;
use std::time::Duration;
use fancy_garbling::rand::Rng;
use fancy_garbling::garble::garble;
use fancy_garbling::circuit::Builder;
fn bench_projection_garble(c: &mut Criterion, q: u8) {
c.bench_function(&format!("garbling::proj{}_gb", q), move |bench| {
let mut tab = Vec::new();
for i in 0..q {
tab.push((i + 1) % q);
}
let mut b = Builder::new();
let x = b.input(q);
let _ = b.input(q);
let z = b.proj(x, q, tab);
b.output(z);
let c = b.finish();
bench.iter(|| {
let (gb, _ev) = garble(&c);
criterion::black_box(gb);
});
});
}
fn bench_projection_eval(c: &mut Criterion, q: u8) {
c.bench_function(&format!("garbling::proj{}_ev", q), move |bench| {
let ref mut rng = Rng::new();
let mut tab = Vec::new();
for i in 0..q {
tab.push((i + 1) % q);
}
let mut b = Builder::new();
let x = b.input(q);
let _ = b.input(q);
let z = b.proj(x, q, tab);
b.output(z);
let c = b.finish();
let (gb, ev) = garble(&c);
let x = rng.gen_byte() % q;
let y = rng.gen_byte() % q;
let xs = gb.encode(&[x,y]);
bench.iter(|| {
let ys = ev.eval(&c, &xs);
criterion::black_box(ys);
});
});
}
fn proj17_gb(c: &mut Criterion) { bench_projection_garble(c,17) }
fn proj17_ev(c: &mut Criterion) { bench_projection_eval(c,17) }
criterion_group!{
name = garbling;
config = Criterion::default().warm_up_time(Duration::from_millis(100));
targets = proj17_gb, proj17_ev
}
criterion_main!(garbling);
| 25.797101 | 75 | 0.535393 |
fe67c065509674a5d004e9f29e16fb0cb8d84d5e
| 13,133 |
use std::convert::TryFrom;
use std::fmt;
use std::iter::{Extend, FromIterator};
use gdnative_impl_proc_macros as macros;
use crate::access::{Aligned, MaybeUnaligned};
use crate::private::get_api;
use crate::NewRef;
use crate::{Color, GodotString, VariantArray, Vector2, Vector2Godot, Vector3, Vector3Godot};
/// A reference-counted CoW typed vector using Godot's pool allocator, generic over possible
/// element types.
///
/// This type is CoW. The `Clone` implementation of this type creates a new reference without
/// copying the contents.
///
/// When using this type, it's generally better to perform mutations in batch using `write`,
/// or the `append` methods, as opposed to `push` or `set`, because the latter ones trigger
/// CoW behavior each time they are called.
pub struct TypedArray<T: Element> {
inner: T::SysArray,
}
/// A RAII read access for Godot typed arrays.
pub type Read<'a, T> = Aligned<ReadGuard<'a, T>>;
/// A RAII write access for Godot typed arrays. This will only lock the CoW container once,
/// as opposed to every time with methods like `push`.
pub type Write<'a, T> = Aligned<WriteGuard<'a, T>>;
impl<T: Element> Drop for TypedArray<T> {
#[inline]
fn drop(&mut self) {
unsafe {
(T::destroy_fn(get_api()))(self.sys_mut());
}
}
}
impl<T: Element> Default for TypedArray<T> {
#[inline]
fn default() -> Self {
TypedArray::new()
}
}
impl<T: Element + fmt::Debug> fmt::Debug for TypedArray<T> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.read().iter()).finish()
}
}
impl<T: Element> Clone for TypedArray<T> {
#[inline]
fn clone(&self) -> Self {
self.new_ref()
}
}
impl<T: Element> NewRef for TypedArray<T> {
/// Creates a new reference to this reference-counted instance.
#[inline]
fn new_ref(&self) -> Self {
unsafe {
let mut inner = T::SysArray::default();
(T::new_copy_fn(get_api()))(&mut inner, self.sys());
TypedArray { inner }
}
}
}
impl<T: Element> TypedArray<T> {
/// Creates an empty array.
#[inline]
pub fn new() -> Self {
unsafe {
let mut inner = T::SysArray::default();
(T::new_fn(get_api()))(&mut inner);
TypedArray { inner }
}
}
/// Creates from a `VariantArray` by making a best effort to convert each variant.
#[inline]
pub fn from_variant_array(array: &VariantArray) -> Self {
unsafe {
let mut inner = T::SysArray::default();
(T::new_with_array_fn(get_api()))(&mut inner, array.sys());
TypedArray { inner }
}
}
/// Creates a `TypedArray` moving elements from `src`.
#[inline]
pub fn from_vec(mut src: Vec<T>) -> Self {
let mut arr = Self::new();
arr.append_vec(&mut src);
arr
}
/// Appends an element to the end of the array.
///
/// Calling `push` triggers copy-on-write behavior. To insert a large number of elements,
/// consider using `resize` and `write`.
#[inline]
pub fn push(&mut self, val: T) {
self.push_ref(&val)
}
/// Appends an element to the end of the array by reference.
///
/// Calling `push` triggers copy-on-write behavior. To insert a large number of elements,
/// consider using `resize` and `write`.
#[inline]
pub fn push_ref(&mut self, val: &T) {
unsafe {
(T::append_fn(get_api()))(self.sys_mut(), T::element_to_sys_ref(val));
}
}
/// Copies and appends all values in `src` to the end of the array.
#[inline]
pub fn append(&mut self, src: &Self) {
unsafe {
(T::append_array_fn(get_api()))(self.sys_mut(), src.sys());
}
}
/// Moves all the elements from `src` into `self`, leaving `src` empty.
///
/// # Panics
///
/// If the resulting length would not fit in `i32`.
#[inline]
pub fn append_vec(&mut self, src: &mut Vec<T>) {
let start = self.len() as usize;
let new_len = start + src.len();
self.resize(i32::try_from(new_len).expect("new length should fit in i32"));
let mut write = self.write();
let mut drain = src.drain(..);
for dst in &mut write[start..] {
*dst = drain.next().unwrap();
}
assert!(drain.next().is_none());
}
/// Inserts an element at the given offset and returns `true` if successful.
#[inline]
pub fn insert(&mut self, offset: i32, val: T) -> bool {
self.insert_ref(offset, &val)
}
/// Inserts an element by reference at the given offset and returns `true` if successful.
#[inline]
pub fn insert_ref(&mut self, offset: i32, val: &T) -> bool {
unsafe {
let status =
(T::insert_fn(get_api()))(self.sys_mut(), offset, T::element_to_sys_ref(val));
status != sys::godot_error_GODOT_OK
}
}
/// Inverts the order of the elements in the array.
#[inline]
pub fn invert(&mut self) {
unsafe { (T::invert_fn(get_api()))(self.sys_mut()) }
}
/// Removes an element at the given offset.
#[inline]
pub fn remove(&mut self, idx: i32) {
unsafe {
(T::remove_fn(get_api()))(self.sys_mut(), idx);
}
}
/// Changes the size of the array, possibly removing elements or pushing default values.
#[inline]
pub fn resize(&mut self, size: i32) {
unsafe {
(T::resize_fn(get_api()))(self.sys_mut(), size);
}
}
/// Returns a copy of the element at the given offset.
#[inline]
pub fn get(&self, idx: i32) -> T {
unsafe { T::element_from_sys(T::get_fn(get_api())(self.sys(), idx)) }
}
/// Sets the value of the element at the given offset.
#[inline]
pub fn set(&mut self, idx: i32, val: T) {
self.set_ref(idx, &val)
}
/// Sets the value of the element at the given offset by reference.
#[inline]
pub fn set_ref(&mut self, idx: i32, val: &T) {
unsafe {
(T::set_fn(get_api()))(self.sys_mut(), idx, T::element_to_sys_ref(val));
}
}
/// Returns the number of elements in the array.
#[inline]
pub fn len(&self) -> i32 {
unsafe { (T::size_fn(get_api()))(self.sys()) }
}
/// Returns `true` if the container is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Returns a RAII read access into this array.
#[inline]
pub fn read(&self) -> Read<'_, T> {
unsafe {
MaybeUnaligned::new(ReadGuard::new(self.sys()))
.try_into_aligned()
.expect("Pool array access should be aligned. This indicates a bug in Godot")
}
}
/// Returns a RAII write access into this array. This triggers CoW once per lock, instead
/// of once each mutation.
#[inline]
pub fn write(&mut self) -> Write<'_, T> {
unsafe {
MaybeUnaligned::new(WriteGuard::new(self.sys() as *mut _))
.try_into_aligned()
.expect("Pool array access should be aligned. This indicates a bug in Godot")
}
}
#[doc(hidden)]
#[inline]
pub fn sys(&self) -> *const T::SysArray {
&self.inner
}
#[doc(hidden)]
#[inline]
pub fn sys_mut(&mut self) -> *mut T::SysArray {
&mut self.inner
}
#[doc(hidden)]
#[inline]
pub fn from_sys(sys: T::SysArray) -> Self {
TypedArray { inner: sys }
}
}
impl<T: Element + Copy> TypedArray<T> {
/// Creates a new `TypedArray` by copying from `src`.
///
/// # Panics
///
/// If the length of `src` does not fit in `i32`.
#[inline]
pub fn from_slice(src: &[T]) -> Self {
let mut arr = Self::new();
arr.append_slice(src);
arr
}
/// Copies and appends all values in `src` to the end of the array.
///
/// # Panics
///
/// If the resulting length would not fit in `i32`.
#[inline]
pub fn append_slice(&mut self, src: &[T]) {
let start = self.len() as usize;
let new_len = start + src.len();
self.resize(i32::try_from(new_len).expect("new length should fit in i32"));
let mut write = self.write();
write[start..].copy_from_slice(src)
}
}
// `FromIterator` and `Extend` implementations collect into `Vec` first, because Rust `Vec`s
// are better at handling unknown lengths than the Godot arrays (`push` CoWs every time!)
impl<T: Element> FromIterator<T> for TypedArray<T> {
#[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
let vec = iter.into_iter().collect::<Vec<_>>();
Self::from_vec(vec)
}
}
impl<T: Element> Extend<T> for TypedArray<T> {
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
let mut vec = iter.into_iter().collect::<Vec<_>>();
self.append_vec(&mut vec);
}
}
impl<T: Element + PartialEq> PartialEq for TypedArray<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
return false;
}
let left = self.read();
let right = other.read();
left.as_slice() == right.as_slice()
}
}
impl<T: Element + Eq> Eq for TypedArray<T> {}
/// RAII read guard.
pub struct ReadGuard<'a, T: Element> {
access: *mut T::SysReadAccess,
len: usize,
_marker: std::marker::PhantomData<&'a T>,
}
impl<'a, T: Element> ReadGuard<'a, T> {
#[inline]
unsafe fn new(arr: *const T::SysArray) -> Self {
let len = (T::size_fn(get_api()))(arr) as usize;
let access = (T::read_fn(get_api()))(arr);
Self {
access,
len,
_marker: std::marker::PhantomData,
}
}
}
unsafe impl<'a, T: Element> crate::access::Guard for ReadGuard<'a, T> {
type Target = T;
#[inline]
fn len(&self) -> usize {
self.len
}
#[inline]
fn read_ptr(&self) -> *const Self::Target {
unsafe {
let orig_ptr: *const T::SysTy = (T::read_access_ptr_fn(get_api()))(self.access);
orig_ptr as *const Self::Target
}
}
}
impl<'a, T: Element> Drop for ReadGuard<'a, T> {
#[inline]
fn drop(&mut self) {
unsafe {
(T::read_access_destroy_fn(get_api()))(self.access);
}
}
}
impl<'a, T: Element> Clone for ReadGuard<'a, T> {
#[inline]
fn clone(&self) -> Self {
let access = unsafe { (T::read_access_copy_fn(get_api()))(self.access) };
Self {
access,
len: self.len,
_marker: std::marker::PhantomData,
}
}
}
/// RAII write guard.
pub struct WriteGuard<'a, T: Element> {
access: *mut T::SysWriteAccess,
len: usize,
_marker: std::marker::PhantomData<&'a T>,
}
impl<'a, T: Element> WriteGuard<'a, T> {
#[inline]
unsafe fn new(arr: *mut T::SysArray) -> Self {
let len = (T::size_fn(get_api()))(arr) as usize;
let access = (T::write_fn(get_api()))(arr);
Self {
access,
len,
_marker: std::marker::PhantomData,
}
}
}
unsafe impl<'a, T: Element> crate::access::Guard for WriteGuard<'a, T> {
type Target = T;
#[inline]
fn len(&self) -> usize {
self.len
}
#[inline]
fn read_ptr(&self) -> *const Self::Target {
unsafe {
let orig_ptr: *const T::SysTy = (T::write_access_ptr_fn(get_api()))(self.access);
orig_ptr as *const Self::Target
}
}
}
unsafe impl<'a, T: Element> crate::access::WritePtr for WriteGuard<'a, T> {}
impl<'a, T: Element> Drop for WriteGuard<'a, T> {
#[inline]
fn drop(&mut self) {
unsafe {
(T::write_access_destroy_fn(get_api()))(self.access);
}
}
}
macros::decl_typed_array_element! {
/// Trait for element types that can be contained in `TypedArray`. This trait is sealed
/// and has no public interface.
pub trait Element: private::Sealed { .. }
}
macros::impl_typed_array_element! {
impl Element for u8 => byte { .. }
}
macros::impl_typed_array_element! {
impl Element for i32 => int { .. }
}
macros::impl_typed_array_element! {
impl Element for f32 => real { .. }
}
macros::impl_typed_array_element! {
impl Element for GodotString
as sys::godot_string
ref *const sys::godot_string
=> string
{ .. }
}
macros::impl_typed_array_element! {
impl Element for Vector2
as sys::godot_vector2
ref *const sys::godot_vector2
=> vector2
{ .. }
}
macros::impl_typed_array_element! {
impl Element for Vector3
as sys::godot_vector3
ref *const sys::godot_vector3
=> vector3
{ .. }
}
macros::impl_typed_array_element! {
impl Element for Color
as sys::godot_color
ref *const sys::godot_color
=> color
{ .. }
}
mod private {
pub trait Sealed {}
}
| 27.417537 | 94 | 0.569253 |
381904f06315af5424bb0c4f349cc843e2a02566
| 389 |
use anchor_lang::prelude::*;
#[account]
pub struct Data {
pub udata: u128,
pub idata: i128,
}
#[account]
#[derive(Default)]
pub struct DataU16 {
pub data: u16,
}
#[account]
pub struct DataI8 {
pub data: i8,
}
#[account]
pub struct DataI16 {
pub data: i16,
}
#[account(zero_copy)]
#[derive(Default)]
pub struct DataZeroCopy {
pub data: u16,
pub bump: u8,
}
| 12.548387 | 28 | 0.627249 |
3961a5a6dc2e9db7360658adaaa3ec64771fa8cf
| 5,554 |
use std::ffi::CStr;
use crate::extn::core::integer::Integer;
use crate::extn::prelude::*;
const NUMERIC_CSTR: &CStr = cstr::cstr!("Numeric");
pub fn init(interp: &mut Artichoke) -> InitializeResult<()> {
if interp.is_class_defined::<Numeric>() {
return Ok(());
}
let spec = class::Spec::new("Numeric", NUMERIC_CSTR, None, None)?;
interp.def_class::<Numeric>(spec)?;
let _ = interp.eval(&include_bytes!("numeric.rb")[..])?;
trace!("Patched Numeric onto interpreter");
Ok(())
}
#[derive(Debug, Clone, Copy)]
pub struct Numeric;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub enum Outcome {
Float(Fp),
Integer(Int),
// TODO: Complex? Rational?
}
impl ConvertMut<Outcome, Value> for Artichoke {
fn convert_mut(&mut self, from: Outcome) -> Value {
match from {
Outcome::Float(num) => self.convert_mut(num),
Outcome::Integer(num) => self.convert(num),
}
}
}
const MAX_COERCE_DEPTH: u8 = 15;
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd)]
pub enum Coercion {
Float(Fp, Fp),
Integer(Int, Int),
// TODO: Complex? Rational?
}
/// If `y` is the same type as `x`, returns an array `[y, x]`. Otherwise,
/// returns an array with both `y` and `x` represented as `Float` objects.
///
/// This coercion mechanism is used by Ruby to handle mixed-type numeric
/// operations: it is intended to find a compatible common type between the two
/// operands of the operator.
///
/// See [`Numeric#coerce`][numeric].
///
/// # Coercion enum
///
/// Artichoke represents the `[y, x]` tuple Array as the [`Coercion`] enum, which
/// orders its values `Coercion::Integer(x, y)`.
///
/// # Examples
///
/// ```
/// # use artichoke_backend::prelude::*;
/// # use artichoke_backend::extn::core::numeric::{self, Coercion};
/// # fn example() -> Result<(), Box<std::error::Error>> {
/// # let mut interp = artichoke_backend::interpreter()?;
/// let x = interp.convert(1_i64);
/// let y = interp.convert_mut(2.5_f64);
/// assert_eq!(Coercion::Float(1.0, 2.5), numeric::coerce(&mut interp, x, y)?);
/// let x = interp.convert_mut(1.2_f64);
/// let y = interp.convert(3_i64);
/// assert_eq!(Coercion::Float(1.2, 3.0), numeric::coerce(&mut interp, x, y)?);
/// let x = interp.convert(1_i64);
/// let y = interp.convert(2_i64);
/// assert_eq!(Coercion::Integer(1, 2), numeric::coerce(&mut interp, x, y)?);
/// # Ok(())
/// # }
/// # example().unwrap();
/// ```
///
/// [numeric]: https://ruby-doc.org/core-2.6.3/Numeric.html#method-i-coerce
pub fn coerce(interp: &mut Artichoke, x: Value, y: Value) -> Result<Coercion, Error> {
fn do_coerce(interp: &mut Artichoke, x: Value, y: Value, depth: u8) -> Result<Coercion, Error> {
if depth > MAX_COERCE_DEPTH {
return Err(SystemStackError::with_message("stack level too deep").into());
}
match (x.ruby_type(), y.ruby_type()) {
(Ruby::Float, Ruby::Float) => Ok(Coercion::Float(x.try_into(interp)?, y.try_into(interp)?)),
(Ruby::Float, Ruby::Fixnum) => {
let y = y.try_into::<Integer>(interp)?;
Ok(Coercion::Float(x.try_into(interp)?, y.as_f64()))
}
(Ruby::Fixnum, Ruby::Float) => {
let x = x.try_into::<Integer>(interp)?;
Ok(Coercion::Float(x.as_f64(), y.try_into(interp)?))
}
(Ruby::Fixnum, Ruby::Fixnum) => Ok(Coercion::Integer(x.try_into(interp)?, y.try_into(interp)?)),
_ => {
let class_of_numeric = interp
.class_of::<Numeric>()?
.ok_or_else(|| NotDefinedError::class("Numeric"))?;
let is_a_numeric = y.funcall(interp, "is_a?", &[class_of_numeric], None)?;
let is_a_numeric = interp.try_convert(is_a_numeric);
if let Ok(true) = is_a_numeric {
if y.respond_to(interp, "coerce")? {
let coerced = y.funcall(interp, "coerce", &[x], None)?;
let coerced: Vec<Value> = interp
.try_convert_mut(coerced)
.map_err(|_| TypeError::with_message("coerce must return [x, y]"))?;
let mut coerced = coerced.into_iter();
let y = coerced
.next()
.ok_or_else(|| TypeError::with_message("coerce must return [x, y]"))?;
let x = coerced
.next()
.ok_or_else(|| TypeError::with_message("coerce must return [x, y]"))?;
if coerced.next().is_some() {
Err(TypeError::with_message("coerce must return [x, y]").into())
} else {
do_coerce(interp, x, y, depth + 1)
}
} else {
let mut message = String::from("can't convert ");
message.push_str(interp.inspect_type_name_for_value(y));
message.push_str(" into Float");
Err(TypeError::from(message).into())
}
} else {
let mut message = String::from(interp.inspect_type_name_for_value(y));
message.push_str(" can't be coerced into Float");
Err(TypeError::from(message).into())
}
}
}
}
do_coerce(interp, x, y, 0)
}
| 39.956835 | 108 | 0.539611 |
765113cfbb9e2ef8d639a1d63415b272077dc5c6
| 875 |
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-pass
trait Foo {
const AMT: usize;
}
enum Bar<A, B> {
First(A),
Second(B),
}
impl<A: Foo, B: Foo> Foo for Bar<A, B> {
const AMT: usize = [A::AMT][(A::AMT > B::AMT) as usize];
}
impl Foo for u8 {
const AMT: usize = 1;
}
impl Foo for u16 {
const AMT: usize = 2;
}
fn main() {
println!("{}", <Bar<u16, u8> as Foo>::AMT); //~ WARN const_err
//~^ WARN const_err
}
| 23.026316 | 68 | 0.645714 |
91130081f2bcc8c2b67345e2010cc1cd9340de38
| 5,661 |
// Copyright 2020, The Tari Project
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
// following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
// disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
// following disclaimer in the documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
// products derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
// INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
use std::mem::size_of;
use chacha20::{
cipher::{NewCipher, StreamCipher},
ChaCha20,
Key,
Nonce,
};
use digest::Digest;
use rand::{rngs::OsRng, RngCore};
use tari_comms::types::{Challenge, CommsPublicKey};
use tari_crypto::{
keys::{DiffieHellmanSharedSecret, PublicKey},
tari_utilities::{epoch_time::EpochTime, ByteArray},
};
use crate::{
envelope::{DhtMessageFlags, DhtMessageHeader, DhtMessageType, NodeDestination},
outbound::DhtOutboundError,
version::DhtProtocolVersion,
};
pub fn generate_ecdh_secret<PK>(secret_key: &PK::K, public_key: &PK) -> PK
where PK: PublicKey + DiffieHellmanSharedSecret<PK = PK> {
PK::shared_secret(secret_key, public_key)
}
pub fn decrypt(cipher_key: &CommsPublicKey, cipher_text: &[u8]) -> Result<Vec<u8>, DhtOutboundError> {
if cipher_text.len() < size_of::<Nonce>() {
return Err(DhtOutboundError::CipherError(
"Cipher text is not long enough to include nonce".to_string(),
));
}
let (nonce, cipher_text) = cipher_text.split_at(size_of::<Nonce>());
let nonce = Nonce::from_slice(nonce);
let mut cipher_text = cipher_text.to_vec();
let key = Key::from_slice(cipher_key.as_bytes()); // 32-bytes
let mut cipher = ChaCha20::new(key, nonce);
cipher.apply_keystream(cipher_text.as_mut_slice());
Ok(cipher_text)
}
pub fn encrypt(cipher_key: &CommsPublicKey, plain_text: &[u8]) -> Result<Vec<u8>, DhtOutboundError> {
let mut nonce = [0u8; size_of::<Nonce>()];
OsRng.fill_bytes(&mut nonce);
let nonce_ga = Nonce::from_slice(&nonce);
let key = Key::from_slice(cipher_key.as_bytes()); // 32-bytes
let mut cipher = ChaCha20::new(key, nonce_ga);
// Cloning the plain text to avoid a caller thinking we have encrypted in place and losing the integral nonce added
// below
let mut plain_text_clone = plain_text.to_vec();
cipher.apply_keystream(plain_text_clone.as_mut_slice());
let mut ciphertext_integral_nonce = nonce.to_vec();
ciphertext_integral_nonce.append(&mut plain_text_clone);
Ok(ciphertext_integral_nonce)
}
pub fn create_origin_mac_challenge(header: &DhtMessageHeader, body: &[u8]) -> Challenge {
create_origin_mac_challenge_parts(
header.version,
&header.destination,
&header.message_type,
header.flags,
header.expires,
header.ephemeral_public_key.as_ref(),
body,
)
}
pub fn create_origin_mac_challenge_parts(
protocol_version: DhtProtocolVersion,
destination: &NodeDestination,
message_type: &DhtMessageType,
flags: DhtMessageFlags,
expires: Option<EpochTime>,
ephemeral_public_key: Option<&CommsPublicKey>,
body: &[u8],
) -> Challenge {
let mut mac_challenge = Challenge::new();
mac_challenge.update(&protocol_version.to_bytes());
mac_challenge.update(destination.to_inner_bytes().as_slice());
mac_challenge.update(&(*message_type as i32).to_le_bytes());
mac_challenge.update(&flags.bits().to_le_bytes());
if let Some(t) = expires {
mac_challenge.update(&t.as_u64().to_le_bytes());
}
if let Some(e_pk) = ephemeral_public_key.as_ref() {
mac_challenge.update(e_pk.as_bytes());
}
mac_challenge.update(&body);
mac_challenge
}
#[cfg(test)]
mod test {
use tari_utilities::hex::from_hex;
use super::*;
#[test]
fn encrypt_decrypt() {
let key = CommsPublicKey::default();
let plain_text = "Last enemy position 0830h AJ 9863".as_bytes().to_vec();
let encrypted = encrypt(&key, &plain_text).unwrap();
let decrypted = decrypt(&key, &encrypted).unwrap();
assert_eq!(decrypted, plain_text);
}
#[test]
fn decrypt_fn() {
let key = CommsPublicKey::default();
let cipher_text =
from_hex("24bf9e698e14938e93c09e432274af7c143f8fb831f344f244ef02ca78a07ddc28b46fec536a0ca5c04737a604")
.unwrap();
let plain_text = decrypt(&key, &cipher_text).unwrap();
let secret_msg = "Last enemy position 0830h AJ 9863".as_bytes().to_vec();
assert_eq!(plain_text, secret_msg);
}
}
| 37.74 | 119 | 0.708709 |
489b5ccded85a1fd0b3d6f4d7294ec2a21ab7aae
| 724 |
//! Generated file, do not edit by hand, see `xtask/codegen`
use crate::generated::FormatJsAnyImportClause;
use crate::prelude::*;
use rome_js_syntax::JsAnyImportClause;
impl FormatRule<JsAnyImportClause> for FormatJsAnyImportClause {
type Context = JsFormatContext;
fn fmt(node: &JsAnyImportClause, f: &mut JsFormatter) -> FormatResult<()> {
match node {
JsAnyImportClause::JsImportBareClause(node) => node.format().fmt(f),
JsAnyImportClause::JsImportNamedClause(node) => node.format().fmt(f),
JsAnyImportClause::JsImportDefaultClause(node) => node.format().fmt(f),
JsAnyImportClause::JsImportNamespaceClause(node) => node.format().fmt(f),
}
}
}
| 42.588235 | 85 | 0.687845 |
28f10e457ac4ddc8fbcba4386059ac3600a04e34
| 99,043 |
// Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
#[derive(Debug)]
pub(crate) struct Handle<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
pub(crate) client: aws_smithy_client::Client<C, M, R>,
pub(crate) conf: crate::Config,
}
/// Client for Migration Hub Strategy Recommendations
///
/// Client for invoking operations on Migration Hub Strategy Recommendations. Each operation on Migration Hub Strategy Recommendations is a method on this
/// this struct. `.send()` MUST be invoked on the generated operations to dispatch the request to the service.
///
/// # Examples
/// **Constructing a client and invoking an operation**
/// ```rust,no_run
/// # async fn docs() {
/// // create a shared configuration. This can be used & shared between multiple service clients.
/// let shared_config = aws_config::load_from_env().await;
/// let client = aws_sdk_migrationhubstrategy::Client::new(&shared_config);
/// // invoke an operation
/// /* let rsp = client
/// .<operation_name>().
/// .<param>("some value")
/// .send().await; */
/// # }
/// ```
/// **Constructing a client with custom configuration**
/// ```rust,no_run
/// use aws_config::RetryConfig;
/// # async fn docs() {
/// let shared_config = aws_config::load_from_env().await;
/// let config = aws_sdk_migrationhubstrategy::config::Builder::from(&shared_config)
/// .retry_config(RetryConfig::disabled())
/// .build();
/// let client = aws_sdk_migrationhubstrategy::Client::from_conf(config);
/// # }
#[derive(std::fmt::Debug)]
pub struct Client<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<Handle<C, M, R>>,
}
impl<C, M, R> std::clone::Clone for Client<C, M, R> {
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
}
}
}
#[doc(inline)]
pub use aws_smithy_client::Builder;
impl<C, M, R> From<aws_smithy_client::Client<C, M, R>> for Client<C, M, R> {
fn from(client: aws_smithy_client::Client<C, M, R>) -> Self {
Self::with_config(client, crate::Config::builder().build())
}
}
impl<C, M, R> Client<C, M, R> {
/// Creates a client with the given service configuration.
pub fn with_config(client: aws_smithy_client::Client<C, M, R>, conf: crate::Config) -> Self {
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
/// Returns the client's configuration.
pub fn conf(&self) -> &crate::Config {
&self.handle.conf
}
}
impl<C, M, R> Client<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Constructs a fluent builder for the `GetApplicationComponentDetails` operation.
///
/// See [`GetApplicationComponentDetails`](crate::client::fluent_builders::GetApplicationComponentDetails) for more information about the
/// operation and its arguments.
pub fn get_application_component_details(
&self,
) -> fluent_builders::GetApplicationComponentDetails<C, M, R> {
fluent_builders::GetApplicationComponentDetails::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetApplicationComponentStrategies` operation.
///
/// See [`GetApplicationComponentStrategies`](crate::client::fluent_builders::GetApplicationComponentStrategies) for more information about the
/// operation and its arguments.
pub fn get_application_component_strategies(
&self,
) -> fluent_builders::GetApplicationComponentStrategies<C, M, R> {
fluent_builders::GetApplicationComponentStrategies::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetAssessment` operation.
///
/// See [`GetAssessment`](crate::client::fluent_builders::GetAssessment) for more information about the
/// operation and its arguments.
pub fn get_assessment(&self) -> fluent_builders::GetAssessment<C, M, R> {
fluent_builders::GetAssessment::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetImportFileTask` operation.
///
/// See [`GetImportFileTask`](crate::client::fluent_builders::GetImportFileTask) for more information about the
/// operation and its arguments.
pub fn get_import_file_task(&self) -> fluent_builders::GetImportFileTask<C, M, R> {
fluent_builders::GetImportFileTask::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetPortfolioPreferences` operation.
///
/// See [`GetPortfolioPreferences`](crate::client::fluent_builders::GetPortfolioPreferences) for more information about the
/// operation and its arguments.
pub fn get_portfolio_preferences(&self) -> fluent_builders::GetPortfolioPreferences<C, M, R> {
fluent_builders::GetPortfolioPreferences::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetPortfolioSummary` operation.
///
/// See [`GetPortfolioSummary`](crate::client::fluent_builders::GetPortfolioSummary) for more information about the
/// operation and its arguments.
pub fn get_portfolio_summary(&self) -> fluent_builders::GetPortfolioSummary<C, M, R> {
fluent_builders::GetPortfolioSummary::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetRecommendationReportDetails` operation.
///
/// See [`GetRecommendationReportDetails`](crate::client::fluent_builders::GetRecommendationReportDetails) for more information about the
/// operation and its arguments.
pub fn get_recommendation_report_details(
&self,
) -> fluent_builders::GetRecommendationReportDetails<C, M, R> {
fluent_builders::GetRecommendationReportDetails::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetServerDetails` operation.
///
/// See [`GetServerDetails`](crate::client::fluent_builders::GetServerDetails) for more information about the
/// operation and its arguments.
/// This operation supports pagination. See [`into_paginator()`](crate::client::fluent_builders::GetServerDetails::into_paginator).
pub fn get_server_details(&self) -> fluent_builders::GetServerDetails<C, M, R> {
fluent_builders::GetServerDetails::new(self.handle.clone())
}
/// Constructs a fluent builder for the `GetServerStrategies` operation.
///
/// See [`GetServerStrategies`](crate::client::fluent_builders::GetServerStrategies) for more information about the
/// operation and its arguments.
pub fn get_server_strategies(&self) -> fluent_builders::GetServerStrategies<C, M, R> {
fluent_builders::GetServerStrategies::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListApplicationComponents` operation.
///
/// See [`ListApplicationComponents`](crate::client::fluent_builders::ListApplicationComponents) for more information about the
/// operation and its arguments.
/// This operation supports pagination. See [`into_paginator()`](crate::client::fluent_builders::ListApplicationComponents::into_paginator).
pub fn list_application_components(
&self,
) -> fluent_builders::ListApplicationComponents<C, M, R> {
fluent_builders::ListApplicationComponents::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListCollectors` operation.
///
/// See [`ListCollectors`](crate::client::fluent_builders::ListCollectors) for more information about the
/// operation and its arguments.
/// This operation supports pagination. See [`into_paginator()`](crate::client::fluent_builders::ListCollectors::into_paginator).
pub fn list_collectors(&self) -> fluent_builders::ListCollectors<C, M, R> {
fluent_builders::ListCollectors::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListImportFileTask` operation.
///
/// See [`ListImportFileTask`](crate::client::fluent_builders::ListImportFileTask) for more information about the
/// operation and its arguments.
/// This operation supports pagination. See [`into_paginator()`](crate::client::fluent_builders::ListImportFileTask::into_paginator).
pub fn list_import_file_task(&self) -> fluent_builders::ListImportFileTask<C, M, R> {
fluent_builders::ListImportFileTask::new(self.handle.clone())
}
/// Constructs a fluent builder for the `ListServers` operation.
///
/// See [`ListServers`](crate::client::fluent_builders::ListServers) for more information about the
/// operation and its arguments.
/// This operation supports pagination. See [`into_paginator()`](crate::client::fluent_builders::ListServers::into_paginator).
pub fn list_servers(&self) -> fluent_builders::ListServers<C, M, R> {
fluent_builders::ListServers::new(self.handle.clone())
}
/// Constructs a fluent builder for the `PutPortfolioPreferences` operation.
///
/// See [`PutPortfolioPreferences`](crate::client::fluent_builders::PutPortfolioPreferences) for more information about the
/// operation and its arguments.
pub fn put_portfolio_preferences(&self) -> fluent_builders::PutPortfolioPreferences<C, M, R> {
fluent_builders::PutPortfolioPreferences::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartAssessment` operation.
///
/// See [`StartAssessment`](crate::client::fluent_builders::StartAssessment) for more information about the
/// operation and its arguments.
pub fn start_assessment(&self) -> fluent_builders::StartAssessment<C, M, R> {
fluent_builders::StartAssessment::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartImportFileTask` operation.
///
/// See [`StartImportFileTask`](crate::client::fluent_builders::StartImportFileTask) for more information about the
/// operation and its arguments.
pub fn start_import_file_task(&self) -> fluent_builders::StartImportFileTask<C, M, R> {
fluent_builders::StartImportFileTask::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StartRecommendationReportGeneration` operation.
///
/// See [`StartRecommendationReportGeneration`](crate::client::fluent_builders::StartRecommendationReportGeneration) for more information about the
/// operation and its arguments.
pub fn start_recommendation_report_generation(
&self,
) -> fluent_builders::StartRecommendationReportGeneration<C, M, R> {
fluent_builders::StartRecommendationReportGeneration::new(self.handle.clone())
}
/// Constructs a fluent builder for the `StopAssessment` operation.
///
/// See [`StopAssessment`](crate::client::fluent_builders::StopAssessment) for more information about the
/// operation and its arguments.
pub fn stop_assessment(&self) -> fluent_builders::StopAssessment<C, M, R> {
fluent_builders::StopAssessment::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateApplicationComponentConfig` operation.
///
/// See [`UpdateApplicationComponentConfig`](crate::client::fluent_builders::UpdateApplicationComponentConfig) for more information about the
/// operation and its arguments.
pub fn update_application_component_config(
&self,
) -> fluent_builders::UpdateApplicationComponentConfig<C, M, R> {
fluent_builders::UpdateApplicationComponentConfig::new(self.handle.clone())
}
/// Constructs a fluent builder for the `UpdateServerConfig` operation.
///
/// See [`UpdateServerConfig`](crate::client::fluent_builders::UpdateServerConfig) for more information about the
/// operation and its arguments.
pub fn update_server_config(&self) -> fluent_builders::UpdateServerConfig<C, M, R> {
fluent_builders::UpdateServerConfig::new(self.handle.clone())
}
}
pub mod fluent_builders {
//!
//! Utilities to ergonomically construct a request to the service.
//!
//! Fluent builders are created through the [`Client`](crate::client::Client) by calling
//! one if its operation methods. After parameters are set using the builder methods,
//! the `send` method can be called to initiate the request.
//!
/// Fluent builder constructing a request to `GetApplicationComponentDetails`.
///
/// <p> Retrieves details about an application component. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetApplicationComponentDetails<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_application_component_details_input::Builder,
}
impl<C, M, R> GetApplicationComponentDetails<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetApplicationComponentDetails`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetApplicationComponentDetailsOutput,
aws_smithy_http::result::SdkError<crate::error::GetApplicationComponentDetailsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetApplicationComponentDetailsInputOperationOutputAlias,
crate::output::GetApplicationComponentDetailsOutput,
crate::error::GetApplicationComponentDetailsError,
crate::input::GetApplicationComponentDetailsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the application component. The ID is unique within an AWS account.</p>
pub fn application_component_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_component_id(input.into());
self
}
/// <p> The ID of the application component. The ID is unique within an AWS account.</p>
pub fn set_application_component_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_component_id(input);
self
}
}
/// Fluent builder constructing a request to `GetApplicationComponentStrategies`.
///
/// <p> Retrieves a list of all the recommended strategies and tools for an application component running on a server. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetApplicationComponentStrategies<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_application_component_strategies_input::Builder,
}
impl<C, M, R> GetApplicationComponentStrategies<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetApplicationComponentStrategies`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetApplicationComponentStrategiesOutput,
aws_smithy_http::result::SdkError<crate::error::GetApplicationComponentStrategiesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetApplicationComponentStrategiesInputOperationOutputAlias,
crate::output::GetApplicationComponentStrategiesOutput,
crate::error::GetApplicationComponentStrategiesError,
crate::input::GetApplicationComponentStrategiesInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the application component. The ID is unique within an AWS account.</p>
pub fn application_component_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_component_id(input.into());
self
}
/// <p> The ID of the application component. The ID is unique within an AWS account.</p>
pub fn set_application_component_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_component_id(input);
self
}
}
/// Fluent builder constructing a request to `GetAssessment`.
///
/// <p> Retrieves the status of an on-going assessment. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetAssessment<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_assessment_input::Builder,
}
impl<C, M, R> GetAssessment<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetAssessment`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetAssessmentOutput,
aws_smithy_http::result::SdkError<crate::error::GetAssessmentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetAssessmentInputOperationOutputAlias,
crate::output::GetAssessmentOutput,
crate::error::GetAssessmentError,
crate::input::GetAssessmentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The <code>assessmentid</code> returned by <code>StartAssessment</code>.</p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// <p> The <code>assessmentid</code> returned by <code>StartAssessment</code>.</p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `GetImportFileTask`.
///
/// <p> Retrieves the details about a specific import task. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetImportFileTask<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_import_file_task_input::Builder,
}
impl<C, M, R> GetImportFileTask<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetImportFileTask`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetImportFileTaskOutput,
aws_smithy_http::result::SdkError<crate::error::GetImportFileTaskError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetImportFileTaskInputOperationOutputAlias,
crate::output::GetImportFileTaskOutput,
crate::error::GetImportFileTaskError,
crate::input::GetImportFileTaskInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the import file task. This ID is returned in the response of <code>StartImportFileTask</code>. </p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// <p> The ID of the import file task. This ID is returned in the response of <code>StartImportFileTask</code>. </p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `GetPortfolioPreferences`.
///
/// <p> Retrieves your migration and modernization preferences. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetPortfolioPreferences<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_portfolio_preferences_input::Builder,
}
impl<C, M, R> GetPortfolioPreferences<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetPortfolioPreferences`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetPortfolioPreferencesOutput,
aws_smithy_http::result::SdkError<crate::error::GetPortfolioPreferencesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetPortfolioPreferencesInputOperationOutputAlias,
crate::output::GetPortfolioPreferencesOutput,
crate::error::GetPortfolioPreferencesError,
crate::input::GetPortfolioPreferencesInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `GetPortfolioSummary`.
///
/// <p> Retrieves overall summary including the number of servers to rehost and the overall number of anti-patterns. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetPortfolioSummary<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_portfolio_summary_input::Builder,
}
impl<C, M, R> GetPortfolioSummary<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetPortfolioSummary`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetPortfolioSummaryOutput,
aws_smithy_http::result::SdkError<crate::error::GetPortfolioSummaryError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetPortfolioSummaryInputOperationOutputAlias,
crate::output::GetPortfolioSummaryOutput,
crate::error::GetPortfolioSummaryError,
crate::input::GetPortfolioSummaryInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
}
/// Fluent builder constructing a request to `GetRecommendationReportDetails`.
///
/// <p> Retrieves detailed information about the specified recommendation report. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetRecommendationReportDetails<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_recommendation_report_details_input::Builder,
}
impl<C, M, R> GetRecommendationReportDetails<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetRecommendationReportDetails`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetRecommendationReportDetailsOutput,
aws_smithy_http::result::SdkError<crate::error::GetRecommendationReportDetailsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetRecommendationReportDetailsInputOperationOutputAlias,
crate::output::GetRecommendationReportDetailsOutput,
crate::error::GetRecommendationReportDetailsError,
crate::input::GetRecommendationReportDetailsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The recommendation report generation task <code>id</code> returned by <code>StartRecommendationReportGeneration</code>. </p>
pub fn id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.id(input.into());
self
}
/// <p> The recommendation report generation task <code>id</code> returned by <code>StartRecommendationReportGeneration</code>. </p>
pub fn set_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_id(input);
self
}
}
/// Fluent builder constructing a request to `GetServerDetails`.
///
/// <p> Retrieves detailed information about a specified server. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetServerDetails<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_server_details_input::Builder,
}
impl<C, M, R> GetServerDetails<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetServerDetails`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetServerDetailsOutput,
aws_smithy_http::result::SdkError<crate::error::GetServerDetailsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetServerDetailsInputOperationOutputAlias,
crate::output::GetServerDetailsOutput,
crate::error::GetServerDetailsError,
crate::input::GetServerDetailsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::GetServerDetailsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::GetServerDetailsPaginator<C, M, R> {
crate::paginator::GetServerDetailsPaginator::new(self.handle, self.inner)
}
/// <p> The ID of the server. </p>
pub fn server_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.server_id(input.into());
self
}
/// <p> The ID of the server. </p>
pub fn set_server_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_server_id(input);
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `GetServerStrategies`.
///
/// <p> Retrieves recommended strategies and tools for the specified server. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct GetServerStrategies<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::get_server_strategies_input::Builder,
}
impl<C, M, R> GetServerStrategies<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `GetServerStrategies`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::GetServerStrategiesOutput,
aws_smithy_http::result::SdkError<crate::error::GetServerStrategiesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::GetServerStrategiesInputOperationOutputAlias,
crate::output::GetServerStrategiesOutput,
crate::error::GetServerStrategiesError,
crate::input::GetServerStrategiesInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the server. </p>
pub fn server_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.server_id(input.into());
self
}
/// <p> The ID of the server. </p>
pub fn set_server_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_server_id(input);
self
}
}
/// Fluent builder constructing a request to `ListApplicationComponents`.
///
/// <p> Retrieves a list of all the application components (processes). </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListApplicationComponents<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_application_components_input::Builder,
}
impl<C, M, R> ListApplicationComponents<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListApplicationComponents`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListApplicationComponentsOutput,
aws_smithy_http::result::SdkError<crate::error::ListApplicationComponentsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListApplicationComponentsInputOperationOutputAlias,
crate::output::ListApplicationComponentsOutput,
crate::error::ListApplicationComponentsError,
crate::input::ListApplicationComponentsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListApplicationComponentsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(
self,
) -> crate::paginator::ListApplicationComponentsPaginator<C, M, R> {
crate::paginator::ListApplicationComponentsPaginator::new(self.handle, self.inner)
}
/// <p> Criteria for filtering the list of application components. </p>
pub fn application_component_criteria(
mut self,
input: crate::model::ApplicationComponentCriteria,
) -> Self {
self.inner = self.inner.application_component_criteria(input);
self
}
/// <p> Criteria for filtering the list of application components. </p>
pub fn set_application_component_criteria(
mut self,
input: std::option::Option<crate::model::ApplicationComponentCriteria>,
) -> Self {
self.inner = self.inner.set_application_component_criteria(input);
self
}
/// <p> Specify the value based on the application component criteria type. For example, if <code>applicationComponentCriteria</code> is set to <code>SERVER_ID</code> and <code>filterValue</code> is set to <code>server1</code>, then <code>ListApplicationComponents</code> returns all the application components running on server1. </p>
pub fn filter_value(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_value(input.into());
self
}
/// <p> Specify the value based on the application component criteria type. For example, if <code>applicationComponentCriteria</code> is set to <code>SERVER_ID</code> and <code>filterValue</code> is set to <code>server1</code>, then <code>ListApplicationComponents</code> returns all the application components running on server1. </p>
pub fn set_filter_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_value(input);
self
}
/// <p> Specifies whether to sort by ascending (<code>ASC</code>) or descending (<code>DESC</code>) order. </p>
pub fn sort(mut self, input: crate::model::SortOrder) -> Self {
self.inner = self.inner.sort(input);
self
}
/// <p> Specifies whether to sort by ascending (<code>ASC</code>) or descending (<code>DESC</code>) order. </p>
pub fn set_sort(mut self, input: std::option::Option<crate::model::SortOrder>) -> Self {
self.inner = self.inner.set_sort(input);
self
}
/// Appends an item to `groupIdFilter`.
///
/// To override the contents of this collection use [`set_group_id_filter`](Self::set_group_id_filter).
///
/// <p> The group ID specified in to filter on. </p>
pub fn group_id_filter(mut self, input: crate::model::Group) -> Self {
self.inner = self.inner.group_id_filter(input);
self
}
/// <p> The group ID specified in to filter on. </p>
pub fn set_group_id_filter(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Group>>,
) -> Self {
self.inner = self.inner.set_group_id_filter(input);
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `ListCollectors`.
///
/// <p> Retrieves a list of all the installed collectors. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListCollectors<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_collectors_input::Builder,
}
impl<C, M, R> ListCollectors<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListCollectors`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListCollectorsOutput,
aws_smithy_http::result::SdkError<crate::error::ListCollectorsError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListCollectorsInputOperationOutputAlias,
crate::output::ListCollectorsOutput,
crate::error::ListCollectorsError,
crate::input::ListCollectorsInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListCollectorsPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListCollectorsPaginator<C, M, R> {
crate::paginator::ListCollectorsPaginator::new(self.handle, self.inner)
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `ListImportFileTask`.
///
/// <p> Retrieves a list of all the imports performed. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListImportFileTask<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_import_file_task_input::Builder,
}
impl<C, M, R> ListImportFileTask<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListImportFileTask`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListImportFileTaskOutput,
aws_smithy_http::result::SdkError<crate::error::ListImportFileTaskError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListImportFileTaskInputOperationOutputAlias,
crate::output::ListImportFileTaskOutput,
crate::error::ListImportFileTaskError,
crate::input::ListImportFileTaskInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListImportFileTaskPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListImportFileTaskPaginator<C, M, R> {
crate::paginator::ListImportFileTaskPaginator::new(self.handle, self.inner)
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p> The total number of items to return. The maximum value is 100. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The total number of items to return. The maximum value is 100. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `ListServers`.
///
/// <p> Returns a list of all the servers. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct ListServers<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::list_servers_input::Builder,
}
impl<C, M, R> ListServers<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `ListServers`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::ListServersOutput,
aws_smithy_http::result::SdkError<crate::error::ListServersError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::ListServersInputOperationOutputAlias,
crate::output::ListServersOutput,
crate::error::ListServersError,
crate::input::ListServersInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// Create a paginator for this request
///
/// Paginators are used by calling [`send().await`](crate::paginator::ListServersPaginator::send) which returns a [`Stream`](tokio_stream::Stream).
pub fn into_paginator(self) -> crate::paginator::ListServersPaginator<C, M, R> {
crate::paginator::ListServersPaginator::new(self.handle, self.inner)
}
/// <p> Criteria for filtering servers. </p>
pub fn server_criteria(mut self, input: crate::model::ServerCriteria) -> Self {
self.inner = self.inner.server_criteria(input);
self
}
/// <p> Criteria for filtering servers. </p>
pub fn set_server_criteria(
mut self,
input: std::option::Option<crate::model::ServerCriteria>,
) -> Self {
self.inner = self.inner.set_server_criteria(input);
self
}
/// <p> Specifies the filter value, which is based on the type of server criteria. For example, if <code>serverCriteria</code> is <code>OS_NAME</code>, and the <code>filterValue</code> is equal to <code>WindowsServer</code>, then <code>ListServers</code> returns all of the servers matching the OS name <code>WindowsServer</code>. </p>
pub fn filter_value(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.filter_value(input.into());
self
}
/// <p> Specifies the filter value, which is based on the type of server criteria. For example, if <code>serverCriteria</code> is <code>OS_NAME</code>, and the <code>filterValue</code> is equal to <code>WindowsServer</code>, then <code>ListServers</code> returns all of the servers matching the OS name <code>WindowsServer</code>. </p>
pub fn set_filter_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_filter_value(input);
self
}
/// <p> Specifies whether to sort by ascending (<code>ASC</code>) or descending (<code>DESC</code>) order. </p>
pub fn sort(mut self, input: crate::model::SortOrder) -> Self {
self.inner = self.inner.sort(input);
self
}
/// <p> Specifies whether to sort by ascending (<code>ASC</code>) or descending (<code>DESC</code>) order. </p>
pub fn set_sort(mut self, input: std::option::Option<crate::model::SortOrder>) -> Self {
self.inner = self.inner.set_sort(input);
self
}
/// Appends an item to `groupIdFilter`.
///
/// To override the contents of this collection use [`set_group_id_filter`](Self::set_group_id_filter).
///
/// <p> Specifies the group ID to filter on. </p>
pub fn group_id_filter(mut self, input: crate::model::Group) -> Self {
self.inner = self.inner.group_id_filter(input);
self
}
/// <p> Specifies the group ID to filter on. </p>
pub fn set_group_id_filter(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Group>>,
) -> Self {
self.inner = self.inner.set_group_id_filter(input);
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn next_token(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.next_token(input.into());
self
}
/// <p> The token from a previous call that you use to retrieve the next set of results. For example, if a previous call to this action returned 100 items, but you set <code>maxResults</code> to 10. You'll receive a set of 10 results along with a token. You then use the returned token to retrieve the next set of 10. </p>
pub fn set_next_token(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_next_token(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn max_results(mut self, input: i32) -> Self {
self.inner = self.inner.max_results(input);
self
}
/// <p> The maximum number of items to include in the response. The maximum value is 100. </p>
pub fn set_max_results(mut self, input: std::option::Option<i32>) -> Self {
self.inner = self.inner.set_max_results(input);
self
}
}
/// Fluent builder constructing a request to `PutPortfolioPreferences`.
///
/// <p> Saves the specified migration and modernization preferences. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct PutPortfolioPreferences<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::put_portfolio_preferences_input::Builder,
}
impl<C, M, R> PutPortfolioPreferences<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `PutPortfolioPreferences`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::PutPortfolioPreferencesOutput,
aws_smithy_http::result::SdkError<crate::error::PutPortfolioPreferencesError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::PutPortfolioPreferencesInputOperationOutputAlias,
crate::output::PutPortfolioPreferencesOutput,
crate::error::PutPortfolioPreferencesError,
crate::input::PutPortfolioPreferencesInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The rank of the business goals based on priority. </p>
pub fn prioritize_business_goals(
mut self,
input: crate::model::PrioritizeBusinessGoals,
) -> Self {
self.inner = self.inner.prioritize_business_goals(input);
self
}
/// <p> The rank of the business goals based on priority. </p>
pub fn set_prioritize_business_goals(
mut self,
input: std::option::Option<crate::model::PrioritizeBusinessGoals>,
) -> Self {
self.inner = self.inner.set_prioritize_business_goals(input);
self
}
/// <p> The transformation preferences for non-database applications. </p>
pub fn application_preferences(
mut self,
input: crate::model::ApplicationPreferences,
) -> Self {
self.inner = self.inner.application_preferences(input);
self
}
/// <p> The transformation preferences for non-database applications. </p>
pub fn set_application_preferences(
mut self,
input: std::option::Option<crate::model::ApplicationPreferences>,
) -> Self {
self.inner = self.inner.set_application_preferences(input);
self
}
/// <p> The transformation preferences for database applications. </p>
pub fn database_preferences(mut self, input: crate::model::DatabasePreferences) -> Self {
self.inner = self.inner.database_preferences(input);
self
}
/// <p> The transformation preferences for database applications. </p>
pub fn set_database_preferences(
mut self,
input: std::option::Option<crate::model::DatabasePreferences>,
) -> Self {
self.inner = self.inner.set_database_preferences(input);
self
}
}
/// Fluent builder constructing a request to `StartAssessment`.
///
/// <p> Starts the assessment of an on-premises environment. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct StartAssessment<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_assessment_input::Builder,
}
impl<C, M, R> StartAssessment<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartAssessment`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartAssessmentOutput,
aws_smithy_http::result::SdkError<crate::error::StartAssessmentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartAssessmentInputOperationOutputAlias,
crate::output::StartAssessmentOutput,
crate::error::StartAssessmentError,
crate::input::StartAssessmentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The S3 bucket used by the collectors to send analysis data to the service. The bucket name must begin with <code>migrationhub-strategy-</code>. </p>
pub fn s3bucket_for_analysis_data(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3bucket_for_analysis_data(input.into());
self
}
/// <p> The S3 bucket used by the collectors to send analysis data to the service. The bucket name must begin with <code>migrationhub-strategy-</code>. </p>
pub fn set_s3bucket_for_analysis_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3bucket_for_analysis_data(input);
self
}
/// <p> The S3 bucket where all the reports generated by the service are stored. The bucket name must begin with <code>migrationhub-strategy-</code>. </p>
pub fn s3bucket_for_report_data(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3bucket_for_report_data(input.into());
self
}
/// <p> The S3 bucket where all the reports generated by the service are stored. The bucket name must begin with <code>migrationhub-strategy-</code>. </p>
pub fn set_s3bucket_for_report_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3bucket_for_report_data(input);
self
}
}
/// Fluent builder constructing a request to `StartImportFileTask`.
///
/// <p> Starts a file import. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct StartImportFileTask<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_import_file_task_input::Builder,
}
impl<C, M, R> StartImportFileTask<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartImportFileTask`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartImportFileTaskOutput,
aws_smithy_http::result::SdkError<crate::error::StartImportFileTaskError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartImportFileTaskInputOperationOutputAlias,
crate::output::StartImportFileTaskOutput,
crate::error::StartImportFileTaskError,
crate::input::StartImportFileTaskInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> A descriptive name for the request. </p>
pub fn name(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.name(input.into());
self
}
/// <p> A descriptive name for the request. </p>
pub fn set_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_name(input);
self
}
/// <p> The S3 bucket where the import file is located. The bucket name is required to begin with <code>migrationhub-strategy-</code>.</p>
pub fn s3_bucket(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3_bucket(input.into());
self
}
/// <p> The S3 bucket where the import file is located. The bucket name is required to begin with <code>migrationhub-strategy-</code>.</p>
pub fn set_s3_bucket(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3_bucket(input);
self
}
/// <p> The Amazon S3 key name of the import file. </p>
pub fn s3key(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3key(input.into());
self
}
/// <p> The Amazon S3 key name of the import file. </p>
pub fn set_s3key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_s3key(input);
self
}
/// <p>Specifies the source that the servers are coming from. By default, Strategy Recommendations assumes that the servers specified in the import file are available in AWS Application Discovery Service. </p>
pub fn data_source_type(mut self, input: crate::model::DataSourceType) -> Self {
self.inner = self.inner.data_source_type(input);
self
}
/// <p>Specifies the source that the servers are coming from. By default, Strategy Recommendations assumes that the servers specified in the import file are available in AWS Application Discovery Service. </p>
pub fn set_data_source_type(
mut self,
input: std::option::Option<crate::model::DataSourceType>,
) -> Self {
self.inner = self.inner.set_data_source_type(input);
self
}
/// Appends an item to `groupId`.
///
/// To override the contents of this collection use [`set_group_id`](Self::set_group_id).
///
/// <p>Groups the resources in the import file together with a unique name. This ID can be as filter in <code>ListApplicationComponents</code> and <code>ListServers</code>. </p>
pub fn group_id(mut self, input: crate::model::Group) -> Self {
self.inner = self.inner.group_id(input);
self
}
/// <p>Groups the resources in the import file together with a unique name. This ID can be as filter in <code>ListApplicationComponents</code> and <code>ListServers</code>. </p>
pub fn set_group_id(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Group>>,
) -> Self {
self.inner = self.inner.set_group_id(input);
self
}
/// <p> The S3 bucket where Strategy Recommendations uploads import results. The bucket name is required to begin with migrationhub-strategy-. </p>
pub fn s3bucket_for_report_data(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.s3bucket_for_report_data(input.into());
self
}
/// <p> The S3 bucket where Strategy Recommendations uploads import results. The bucket name is required to begin with migrationhub-strategy-. </p>
pub fn set_s3bucket_for_report_data(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_s3bucket_for_report_data(input);
self
}
}
/// Fluent builder constructing a request to `StartRecommendationReportGeneration`.
///
/// <p> Starts generating a recommendation report. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct StartRecommendationReportGeneration<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::start_recommendation_report_generation_input::Builder,
}
impl<C, M, R> StartRecommendationReportGeneration<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StartRecommendationReportGeneration`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StartRecommendationReportGenerationOutput,
aws_smithy_http::result::SdkError<
crate::error::StartRecommendationReportGenerationError,
>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StartRecommendationReportGenerationInputOperationOutputAlias,
crate::output::StartRecommendationReportGenerationOutput,
crate::error::StartRecommendationReportGenerationError,
crate::input::StartRecommendationReportGenerationInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The output format for the recommendation report file. The default format is Microsoft Excel. </p>
pub fn output_format(mut self, input: crate::model::OutputFormat) -> Self {
self.inner = self.inner.output_format(input);
self
}
/// <p> The output format for the recommendation report file. The default format is Microsoft Excel. </p>
pub fn set_output_format(
mut self,
input: std::option::Option<crate::model::OutputFormat>,
) -> Self {
self.inner = self.inner.set_output_format(input);
self
}
/// Appends an item to `groupIdFilter`.
///
/// To override the contents of this collection use [`set_group_id_filter`](Self::set_group_id_filter).
///
/// <p> Groups the resources in the recommendation report with a unique name. </p>
pub fn group_id_filter(mut self, input: crate::model::Group) -> Self {
self.inner = self.inner.group_id_filter(input);
self
}
/// <p> Groups the resources in the recommendation report with a unique name. </p>
pub fn set_group_id_filter(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Group>>,
) -> Self {
self.inner = self.inner.set_group_id_filter(input);
self
}
}
/// Fluent builder constructing a request to `StopAssessment`.
///
/// <p> Stops the assessment of an on-premises environment. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct StopAssessment<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::stop_assessment_input::Builder,
}
impl<C, M, R> StopAssessment<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `StopAssessment`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::StopAssessmentOutput,
aws_smithy_http::result::SdkError<crate::error::StopAssessmentError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::StopAssessmentInputOperationOutputAlias,
crate::output::StopAssessmentOutput,
crate::error::StopAssessmentError,
crate::input::StopAssessmentInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The <code>assessmentId</code> returned by <code>StartAssessment</code>. </p>
pub fn assessment_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.assessment_id(input.into());
self
}
/// <p> The <code>assessmentId</code> returned by <code>StartAssessment</code>. </p>
pub fn set_assessment_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_assessment_id(input);
self
}
}
/// Fluent builder constructing a request to `UpdateApplicationComponentConfig`.
///
/// <p> Updates the configuration of an application component. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateApplicationComponentConfig<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_application_component_config_input::Builder,
}
impl<C, M, R> UpdateApplicationComponentConfig<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateApplicationComponentConfig`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateApplicationComponentConfigOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateApplicationComponentConfigError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateApplicationComponentConfigInputOperationOutputAlias,
crate::output::UpdateApplicationComponentConfigOutput,
crate::error::UpdateApplicationComponentConfigError,
crate::input::UpdateApplicationComponentConfigInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the application component. The ID is unique within an AWS account. </p>
pub fn application_component_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.application_component_id(input.into());
self
}
/// <p> The ID of the application component. The ID is unique within an AWS account. </p>
pub fn set_application_component_id(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_application_component_id(input);
self
}
/// <p> Indicates whether the application component has been included for server recommendation or not. </p>
pub fn inclusion_status(mut self, input: crate::model::InclusionStatus) -> Self {
self.inner = self.inner.inclusion_status(input);
self
}
/// <p> Indicates whether the application component has been included for server recommendation or not. </p>
pub fn set_inclusion_status(
mut self,
input: std::option::Option<crate::model::InclusionStatus>,
) -> Self {
self.inner = self.inner.set_inclusion_status(input);
self
}
/// <p> The preferred strategy options for the application component. Use values from the <code>GetApplicationComponentStrategies</code> response. </p>
pub fn strategy_option(mut self, input: crate::model::StrategyOption) -> Self {
self.inner = self.inner.strategy_option(input);
self
}
/// <p> The preferred strategy options for the application component. Use values from the <code>GetApplicationComponentStrategies</code> response. </p>
pub fn set_strategy_option(
mut self,
input: std::option::Option<crate::model::StrategyOption>,
) -> Self {
self.inner = self.inner.set_strategy_option(input);
self
}
/// Appends an item to `sourceCodeList`.
///
/// To override the contents of this collection use [`set_source_code_list`](Self::set_source_code_list).
///
/// <p> The list of source code configurations to update for the application component. </p>
pub fn source_code_list(mut self, input: crate::model::SourceCode) -> Self {
self.inner = self.inner.source_code_list(input);
self
}
/// <p> The list of source code configurations to update for the application component. </p>
pub fn set_source_code_list(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::SourceCode>>,
) -> Self {
self.inner = self.inner.set_source_code_list(input);
self
}
/// <p> Database credentials. </p>
pub fn secrets_manager_key(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.secrets_manager_key(input.into());
self
}
/// <p> Database credentials. </p>
pub fn set_secrets_manager_key(
mut self,
input: std::option::Option<std::string::String>,
) -> Self {
self.inner = self.inner.set_secrets_manager_key(input);
self
}
}
/// Fluent builder constructing a request to `UpdateServerConfig`.
///
/// <p> Updates the configuration of the specified server. </p>
#[derive(std::clone::Clone, std::fmt::Debug)]
pub struct UpdateServerConfig<
C = aws_smithy_client::erase::DynConnector,
M = crate::middleware::DefaultMiddleware,
R = aws_smithy_client::retry::Standard,
> {
handle: std::sync::Arc<super::Handle<C, M, R>>,
inner: crate::input::update_server_config_input::Builder,
}
impl<C, M, R> UpdateServerConfig<C, M, R>
where
C: aws_smithy_client::bounds::SmithyConnector,
M: aws_smithy_client::bounds::SmithyMiddleware<C>,
R: aws_smithy_client::retry::NewRequestPolicy,
{
/// Creates a new `UpdateServerConfig`.
pub(crate) fn new(handle: std::sync::Arc<super::Handle<C, M, R>>) -> Self {
Self {
handle,
inner: Default::default(),
}
}
/// Sends the request and returns the response.
///
/// If an error occurs, an `SdkError` will be returned with additional details that
/// can be matched against.
///
/// By default, any retryable failures will be retried twice. Retry behavior
/// is configurable with the [RetryConfig](aws_smithy_types::retry::RetryConfig), which can be
/// set when configuring the client.
pub async fn send(
self,
) -> std::result::Result<
crate::output::UpdateServerConfigOutput,
aws_smithy_http::result::SdkError<crate::error::UpdateServerConfigError>,
>
where
R::Policy: aws_smithy_client::bounds::SmithyRetryPolicy<
crate::input::UpdateServerConfigInputOperationOutputAlias,
crate::output::UpdateServerConfigOutput,
crate::error::UpdateServerConfigError,
crate::input::UpdateServerConfigInputOperationRetryAlias,
>,
{
let op = self
.inner
.build()
.map_err(|err| aws_smithy_http::result::SdkError::ConstructionFailure(err.into()))?
.make_operation(&self.handle.conf)
.await
.map_err(|err| {
aws_smithy_http::result::SdkError::ConstructionFailure(err.into())
})?;
self.handle.client.call(op).await
}
/// <p> The ID of the server. </p>
pub fn server_id(mut self, input: impl Into<std::string::String>) -> Self {
self.inner = self.inner.server_id(input.into());
self
}
/// <p> The ID of the server. </p>
pub fn set_server_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.inner = self.inner.set_server_id(input);
self
}
/// <p> The preferred strategy options for the application component. See the response from <code>GetServerStrategies</code>.</p>
pub fn strategy_option(mut self, input: crate::model::StrategyOption) -> Self {
self.inner = self.inner.strategy_option(input);
self
}
/// <p> The preferred strategy options for the application component. See the response from <code>GetServerStrategies</code>.</p>
pub fn set_strategy_option(
mut self,
input: std::option::Option<crate::model::StrategyOption>,
) -> Self {
self.inner = self.inner.set_strategy_option(input);
self
}
}
}
impl<C> Client<C, crate::middleware::DefaultMiddleware, aws_smithy_client::retry::Standard> {
/// Creates a client with the given service config and connector override.
pub fn from_conf_conn(conf: crate::Config, conn: C) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::new()
.connector(conn)
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
impl
Client<
aws_smithy_client::erase::DynConnector,
crate::middleware::DefaultMiddleware,
aws_smithy_client::retry::Standard,
>
{
/// Creates a new client from a shared config.
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn new(config: &aws_types::config::Config) -> Self {
Self::from_conf(config.into())
}
/// Creates a new client from the service [`Config`](crate::Config).
#[cfg(any(feature = "rustls", feature = "native-tls"))]
pub fn from_conf(conf: crate::Config) -> Self {
let retry_config = conf.retry_config.as_ref().cloned().unwrap_or_default();
let timeout_config = conf.timeout_config.as_ref().cloned().unwrap_or_default();
let sleep_impl = conf.sleep_impl.clone();
let mut builder = aws_smithy_client::Builder::dyn_https()
.middleware(crate::middleware::DefaultMiddleware::new());
builder.set_retry_config(retry_config.into());
builder.set_timeout_config(timeout_config);
// the builder maintains a try-state. To avoid suppressing the warning when sleep is unset,
// only set it if we actually have a sleep impl.
if let Some(sleep_impl) = sleep_impl {
builder.set_sleep_impl(Some(sleep_impl));
}
let client = builder.build();
Self {
handle: std::sync::Arc::new(Handle { client, conf }),
}
}
}
| 47.140885 | 343 | 0.60919 |
16f3a9e9be43d285232d74519f9384a29ba9f784
| 707 |
use std::{
collections::HashSet,
marker::PhantomData,
};
use crate::{
pack::*,
class::*,
shape::*,
};
#[derive(Clone, Debug, Default)]
pub struct TestShape<T: 'static> {
phantom: PhantomData<T>,
}
impl<T> TestShape<T> {
pub fn new() -> Self {
Self { phantom: PhantomData }
}
}
impl<T> Shape for TestShape<T> {}
impl<T> Instance<ShapeClass> for TestShape<T> {
fn source(_: &mut HashSet<u64>) -> String { String::new() }
fn inst_name() -> String { "test_shape".to_string() }
}
impl<T> Pack for TestShape<T> {
fn size_int() -> usize { 0 }
fn size_float() -> usize { 0 }
fn pack_to(&self, _buffer_int: &mut [i32], _buffer_float: &mut [f32]) {}
}
| 20.794118 | 76 | 0.588402 |
bfdf008f52abe05529223588cc74bfc1c6d0c392
| 3,702 |
use crate::{
rpc::{typed_data::Data, TypedData},
timer::ScheduleStatus,
};
use serde::Deserialize;
use serde_json::from_str;
use std::collections::HashMap;
/// Represents the timer information from a timer trigger binding.
///
/// The following binding attributes are supported:
///
/// | Name | Description |
/// |------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
/// | `name` | The name of the parameter being bound. |
/// | `schedule` | The CRON expression or a TimeSpan value for the timer. A TimeSpan can be used only for a function app that runs on an App Service Plan. |
/// | `run_on_startup` | If `true`, the function is invoked when the runtime starts. It should rarely, if ever, be set to `true` in production as the function will be invoked on runtime restarts and scale outs. |
/// | `use_monitor` | Set to `true` or `false` to indicate whether the schedule should be monitored. Schedule monitoring persists schedule occurrences to aid in ensuring the schedule is maintained correctly even when function app instances restart. |
///
/// # Examples
///
/// A function that runs every 5 minutes:
///
/// ```rust
/// use azure_functions::bindings::TimerInfo;
/// use azure_functions::func;
/// use log::info;
///
/// #[func]
/// #[binding(name = "_info", schedule = "0 */5 * * * *")]
/// pub fn timer(_info: TimerInfo) {
/// info!("Rust Azure function ran!");
/// }
/// ```
#[derive(Debug, Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct TimerInfo {
/// The schedule status for the timer.
///
/// If schedule monitoring is not enabled for the timer, this field will be `None`.
pub schedule_status: ScheduleStatus,
/// Determines if the timer invocation is due to a missed schedule occurrence.
pub is_past_due: bool,
}
impl TimerInfo {
#[doc(hidden)]
pub fn new(data: TypedData, _: HashMap<String, TypedData>) -> Self {
match &data.data {
Some(Data::Json(s)) => from_str(s).expect("failed to parse timer JSON data"),
_ => panic!("expected JSON data for timer trigger binding"),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_has_json_data() {
const JSON: &'static str = r#"{"ScheduleStatus":{"Last":"0001-01-01T00:00:00","Next":"2018-07-24T23:24:00-07:00","LastUpdated":"0001-01-01T00:00:00"},"IsPastDue":true}"#;
let data = TypedData {
data: Some(Data::Json(JSON.to_string())),
};
let info = TimerInfo::new(data, HashMap::new());
assert!(info.is_past_due);
assert_eq!(
info.schedule_status.last.to_rfc3339(),
"0001-01-01T00:00:00+00:00"
);
assert_eq!(
info.schedule_status.next.to_rfc3339(),
"2018-07-25T06:24:00+00:00"
);
assert_eq!(
info.schedule_status.last_updated.to_rfc3339(),
"0001-01-01T00:00:00+00:00"
);
}
}
| 43.046512 | 253 | 0.490816 |
cc4d1167a4e68b0aa80d2145e50e85e41875b740
| 5,569 |
// Copyright 2015-2019 Parity Technologies (UK) Ltd.
// This file is part of Parity Ethereum.
// Parity Ethereum is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity Ethereum is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
//! Definition of valid items for the verification queue.
use engine::Engine;
use parity_util_mem::MallocSizeOf;
use ethereum_types::{H256, U256};
use common_types::errors::EthcoreError as Error;
pub use self::blocks::Blocks;
pub use self::headers::Headers;
/// Something which can produce a hash and a parent hash.
pub trait BlockLike {
/// Get the hash of this item.
fn hash(&self) -> H256;
/// Get the hash of this item's parent.
fn parent_hash(&self) -> H256;
/// Get the difficulty of this item.
fn difficulty(&self) -> U256;
}
/// Defines transitions between stages of verification.
///
/// It starts with a fallible transformation from an "input" into the unverified item.
/// This consists of quick, simply done checks as well as extracting particular data.
///
/// Then, there is a `verify` function which performs more expensive checks and
/// produces the verified output.
///
/// For correctness, the hashes produced by each stage of the pipeline should be
/// consistent.
pub trait Kind: 'static + Sized + Send + Sync {
/// The first stage: completely unverified.
type Input: Sized + Send + BlockLike + MallocSizeOf;
/// The second stage: partially verified.
type Unverified: Sized + Send + BlockLike + MallocSizeOf;
/// The third stage: completely verified.
type Verified: Sized + Send + BlockLike + MallocSizeOf;
/// Attempt to create the `Unverified` item from the input.
fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)>;
/// Attempt to verify the `Unverified` item using the given engine.
fn verify(unverified: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result<Self::Verified, Error>;
}
/// The blocks verification module.
pub mod blocks {
use super::{Kind, BlockLike};
use engine::Engine;
use common_types::{
block::PreverifiedBlock,
errors::{EthcoreError as Error, BlockError},
verification::Unverified,
};
use log::{debug, warn};
use crate::verification::{verify_block_basic, verify_block_unordered};
use ethereum_types::{H256, U256};
/// A mode for verifying blocks.
pub struct Blocks;
impl Kind for Blocks {
type Input = Unverified;
type Unverified = Unverified;
type Verified = PreverifiedBlock;
fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
match verify_block_basic(&input, engine, check_seal) {
Ok(()) => Ok(input),
Err(Error::Block(BlockError::TemporarilyInvalid(oob))) => {
debug!(target: "client", "Block received too early {}: {:?}", input.hash(), oob);
Err((input, BlockError::TemporarilyInvalid(oob).into()))
},
Err(e) => {
warn!(target: "client", "Stage 1 block verification failed for {}: {:?}", input.hash(), e);
Err((input, e))
}
}
}
fn verify(un: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result<Self::Verified, Error> {
let hash = un.hash();
match verify_block_unordered(un, engine, check_seal) {
Ok(verified) => Ok(verified),
Err(e) => {
warn!(target: "client", "Stage 2 block verification failed for {}: {:?}", hash, e);
Err(e)
}
}
}
}
impl BlockLike for Unverified {
fn hash(&self) -> H256 {
self.header.hash()
}
fn parent_hash(&self) -> H256 {
self.header.parent_hash().clone()
}
fn difficulty(&self) -> U256 {
self.header.difficulty().clone()
}
}
impl BlockLike for PreverifiedBlock {
fn hash(&self) -> H256 {
self.header.hash()
}
fn parent_hash(&self) -> H256 {
self.header.parent_hash().clone()
}
fn difficulty(&self) -> U256 {
self.header.difficulty().clone()
}
}
}
/// Verification for headers.
pub mod headers {
use super::{Kind, BlockLike};
use engine::Engine;
use common_types::{
header::Header,
errors::EthcoreError as Error,
};
use crate::verification::verify_header_params;
use ethereum_types::{H256, U256};
impl BlockLike for Header {
fn hash(&self) -> H256 { self.hash() }
fn parent_hash(&self) -> H256 { self.parent_hash().clone() }
fn difficulty(&self) -> U256 { self.difficulty().clone() }
}
/// A mode for verifying headers.
pub struct Headers;
impl Kind for Headers {
type Input = Header;
type Unverified = Header;
type Verified = Header;
fn create(input: Self::Input, engine: &dyn Engine, check_seal: bool) -> Result<Self::Unverified, (Self::Input, Error)> {
match verify_header_params(&input, engine, true, check_seal) {
Ok(_) => Ok(input),
Err(err) => Err((input, err))
}
}
fn verify(unverified: Self::Unverified, engine: &dyn Engine, check_seal: bool) -> Result<Self::Verified, Error> {
match check_seal {
true => engine.verify_block_unordered(&unverified,).map(|_| unverified),
false => Ok(unverified),
}
}
}
}
| 29.62234 | 122 | 0.685401 |
5de09806ddf304f4ee25aeece8994bde1ed5da29
| 131,851 |
// DO NOT EDIT !
// This file was generated automatically from 'src/mako/cli/main.rs.mako'
// DO NOT EDIT !
#![allow(unused_variables, unused_imports, dead_code, unused_mut)]
extern crate tokio;
#[macro_use]
extern crate clap;
use std::env;
use std::io::{self, Write};
use clap::{App, SubCommand, Arg};
use google_jobs3::{api, Error, oauth2};
mod client;
use client::{InvalidOptionsError, CLIError, arg_from_str, writer_from_opts, parse_kv_arg,
input_file_from_opts, input_mime_from_opts, FieldCursor, FieldError, CallType, UploadProtocol,
calltype_from_str, remove_json_null_values, ComplexType, JsonType, JsonTypeInfo};
use std::default::Default;
use std::str::FromStr;
use serde_json as json;
use clap::ArgMatches;
enum DoitError {
IoError(String, io::Error),
ApiError(Error),
}
struct Engine<'n> {
opt: ArgMatches<'n>,
hub: api::CloudTalentSolution,
gp: Vec<&'static str>,
gpm: Vec<(&'static str, &'static str)>,
}
impl<'n> Engine<'n> {
async fn _projects_client_events_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"client-event.create-time" => Some(("clientEvent.createTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"client-event.event-id" => Some(("clientEvent.eventId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"client-event.extra-info" => Some(("clientEvent.extraInfo", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Map })),
"client-event.job-event.jobs" => Some(("clientEvent.jobEvent.jobs", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"client-event.job-event.type" => Some(("clientEvent.jobEvent.type", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"client-event.parent-event-id" => Some(("clientEvent.parentEventId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"client-event.request-id" => Some(("clientEvent.requestId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["client-event", "create-time", "event-id", "extra-info", "job-event", "jobs", "parent-event-id", "request-id", "type"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::CreateClientEventRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().client_events_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_companies_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"company.career-site-uri" => Some(("company.careerSiteUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.lat-lng.latitude" => Some(("company.derivedInfo.headquartersLocation.latLng.latitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.lat-lng.longitude" => Some(("company.derivedInfo.headquartersLocation.latLng.longitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.location-type" => Some(("company.derivedInfo.headquartersLocation.locationType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.address-lines" => Some(("company.derivedInfo.headquartersLocation.postalAddress.addressLines", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.derived-info.headquarters-location.postal-address.administrative-area" => Some(("company.derivedInfo.headquartersLocation.postalAddress.administrativeArea", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.language-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.languageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.locality" => Some(("company.derivedInfo.headquartersLocation.postalAddress.locality", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.organization" => Some(("company.derivedInfo.headquartersLocation.postalAddress.organization", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.postal-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.postalCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.recipients" => Some(("company.derivedInfo.headquartersLocation.postalAddress.recipients", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.derived-info.headquarters-location.postal-address.region-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.regionCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.revision" => Some(("company.derivedInfo.headquartersLocation.postalAddress.revision", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.sorting-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.sortingCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.sublocality" => Some(("company.derivedInfo.headquartersLocation.postalAddress.sublocality", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.radius-in-miles" => Some(("company.derivedInfo.headquartersLocation.radiusInMiles", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.display-name" => Some(("company.displayName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.eeo-text" => Some(("company.eeoText", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.external-id" => Some(("company.externalId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.headquarters-address" => Some(("company.headquartersAddress", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.hiring-agency" => Some(("company.hiringAgency", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"company.image-uri" => Some(("company.imageUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.keyword-searchable-job-custom-attributes" => Some(("company.keywordSearchableJobCustomAttributes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.name" => Some(("company.name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.size" => Some(("company.size", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.suspended" => Some(("company.suspended", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"company.website-uri" => Some(("company.websiteUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["address-lines", "administrative-area", "career-site-uri", "company", "derived-info", "display-name", "eeo-text", "external-id", "headquarters-address", "headquarters-location", "hiring-agency", "image-uri", "keyword-searchable-job-custom-attributes", "language-code", "lat-lng", "latitude", "locality", "location-type", "longitude", "name", "organization", "postal-address", "postal-code", "radius-in-miles", "recipients", "region-code", "revision", "size", "sorting-code", "sublocality", "suspended", "website-uri"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::CreateCompanyRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().companies_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_companies_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().companies_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_companies_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().companies_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_companies_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().companies_list(opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"require-open-jobs" => {
call = call.require_open_jobs(arg_from_str(value.unwrap_or("false"), err, "require-open-jobs", "boolean"));
},
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["page-size", "page-token", "require-open-jobs"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_companies_patch(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"company.career-site-uri" => Some(("company.careerSiteUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.lat-lng.latitude" => Some(("company.derivedInfo.headquartersLocation.latLng.latitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.lat-lng.longitude" => Some(("company.derivedInfo.headquartersLocation.latLng.longitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.location-type" => Some(("company.derivedInfo.headquartersLocation.locationType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.address-lines" => Some(("company.derivedInfo.headquartersLocation.postalAddress.addressLines", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.derived-info.headquarters-location.postal-address.administrative-area" => Some(("company.derivedInfo.headquartersLocation.postalAddress.administrativeArea", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.language-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.languageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.locality" => Some(("company.derivedInfo.headquartersLocation.postalAddress.locality", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.organization" => Some(("company.derivedInfo.headquartersLocation.postalAddress.organization", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.postal-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.postalCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.recipients" => Some(("company.derivedInfo.headquartersLocation.postalAddress.recipients", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.derived-info.headquarters-location.postal-address.region-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.regionCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.revision" => Some(("company.derivedInfo.headquartersLocation.postalAddress.revision", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.sorting-code" => Some(("company.derivedInfo.headquartersLocation.postalAddress.sortingCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.postal-address.sublocality" => Some(("company.derivedInfo.headquartersLocation.postalAddress.sublocality", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.derived-info.headquarters-location.radius-in-miles" => Some(("company.derivedInfo.headquartersLocation.radiusInMiles", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"company.display-name" => Some(("company.displayName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.eeo-text" => Some(("company.eeoText", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.external-id" => Some(("company.externalId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.headquarters-address" => Some(("company.headquartersAddress", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.hiring-agency" => Some(("company.hiringAgency", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"company.image-uri" => Some(("company.imageUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.keyword-searchable-job-custom-attributes" => Some(("company.keywordSearchableJobCustomAttributes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"company.name" => Some(("company.name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.size" => Some(("company.size", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"company.suspended" => Some(("company.suspended", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"company.website-uri" => Some(("company.websiteUri", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"update-mask" => Some(("updateMask", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["address-lines", "administrative-area", "career-site-uri", "company", "derived-info", "display-name", "eeo-text", "external-id", "headquarters-address", "headquarters-location", "hiring-agency", "image-uri", "keyword-searchable-job-custom-attributes", "language-code", "lat-lng", "latitude", "locality", "location-type", "longitude", "name", "organization", "postal-address", "postal-code", "radius-in-miles", "recipients", "region-code", "revision", "size", "sorting-code", "sublocality", "suspended", "update-mask", "website-uri"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::UpdateCompanyRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().companies_patch(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_complete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().complete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"type" => {
call = call.type_(value.unwrap_or(""));
},
"scope" => {
call = call.scope(value.unwrap_or(""));
},
"query" => {
call = call.query(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"language-codes" => {
call = call.add_language_codes(value.unwrap_or(""));
},
"language-code" => {
call = call.language_code(value.unwrap_or(""));
},
"company-name" => {
call = call.company_name(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["company-name", "language-code", "language-codes", "page-size", "query", "scope", "type"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_batch_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"filter" => Some(("filter", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["filter"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::BatchDeleteJobsRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().jobs_batch_delete(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_create(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"job.addresses" => Some(("job.addresses", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.application-info.emails" => Some(("job.applicationInfo.emails", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.application-info.instruction" => Some(("job.applicationInfo.instruction", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.application-info.uris" => Some(("job.applicationInfo.uris", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.company-display-name" => Some(("job.companyDisplayName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.company-name" => Some(("job.companyName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.currency-code" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.nanos" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.units" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.currency-code" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.nanos" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.units" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.currency-code" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.nanos" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.units" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.currency-code" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.nanos" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.units" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.degree-types" => Some(("job.degreeTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.department" => Some(("job.department", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.derived-info.job-categories" => Some(("job.derivedInfo.jobCategories", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.description" => Some(("job.description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.employment-types" => Some(("job.employmentTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.incentives" => Some(("job.incentives", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-benefits" => Some(("job.jobBenefits", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.job-end-time" => Some(("job.jobEndTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-level" => Some(("job.jobLevel", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-start-time" => Some(("job.jobStartTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.language-code" => Some(("job.languageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.name" => Some(("job.name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-create-time" => Some(("job.postingCreateTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-expire-time" => Some(("job.postingExpireTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-publish-time" => Some(("job.postingPublishTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-region" => Some(("job.postingRegion", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-update-time" => Some(("job.postingUpdateTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.processing-options.disable-street-address-resolution" => Some(("job.processingOptions.disableStreetAddressResolution", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job.processing-options.html-sanitization" => Some(("job.processingOptions.htmlSanitization", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.promotion-value" => Some(("job.promotionValue", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.qualifications" => Some(("job.qualifications", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.requisition-id" => Some(("job.requisitionId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.responsibilities" => Some(("job.responsibilities", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.title" => Some(("job.title", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.visibility" => Some(("job.visibility", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["addresses", "annualized-base-compensation-range", "annualized-total-compensation-range", "application-info", "company-display-name", "company-name", "compensation-info", "currency-code", "degree-types", "department", "derived-info", "description", "disable-street-address-resolution", "emails", "employment-types", "html-sanitization", "incentives", "instruction", "job", "job-benefits", "job-categories", "job-end-time", "job-level", "job-start-time", "language-code", "max-compensation", "min-compensation", "name", "nanos", "posting-create-time", "posting-expire-time", "posting-publish-time", "posting-region", "posting-update-time", "processing-options", "promotion-value", "qualifications", "requisition-id", "responsibilities", "title", "units", "uris", "visibility"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::CreateJobRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().jobs_create(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_delete(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().jobs_delete(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_get(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().jobs_get(opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_list(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut call = self.hub.projects().jobs_list(opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
"page-token" => {
call = call.page_token(value.unwrap_or(""));
},
"page-size" => {
call = call.page_size(arg_from_str(value.unwrap_or("-0"), err, "page-size", "integer"));
},
"job-view" => {
call = call.job_view(value.unwrap_or(""));
},
"filter" => {
call = call.filter(value.unwrap_or(""));
},
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v.extend(["filter", "job-view", "page-size", "page-token"].iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_patch(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"job.addresses" => Some(("job.addresses", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.application-info.emails" => Some(("job.applicationInfo.emails", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.application-info.instruction" => Some(("job.applicationInfo.instruction", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.application-info.uris" => Some(("job.applicationInfo.uris", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.company-display-name" => Some(("job.companyDisplayName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.company-name" => Some(("job.companyName", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.currency-code" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.nanos" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.max-compensation.units" => Some(("job.compensationInfo.annualizedBaseCompensationRange.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.currency-code" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.nanos" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-base-compensation-range.min-compensation.units" => Some(("job.compensationInfo.annualizedBaseCompensationRange.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.currency-code" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.nanos" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.max-compensation.units" => Some(("job.compensationInfo.annualizedTotalCompensationRange.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.currency-code" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.nanos" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.compensation-info.annualized-total-compensation-range.min-compensation.units" => Some(("job.compensationInfo.annualizedTotalCompensationRange.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.degree-types" => Some(("job.degreeTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.department" => Some(("job.department", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.derived-info.job-categories" => Some(("job.derivedInfo.jobCategories", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.description" => Some(("job.description", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.employment-types" => Some(("job.employmentTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.incentives" => Some(("job.incentives", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-benefits" => Some(("job.jobBenefits", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job.job-end-time" => Some(("job.jobEndTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-level" => Some(("job.jobLevel", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.job-start-time" => Some(("job.jobStartTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.language-code" => Some(("job.languageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.name" => Some(("job.name", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-create-time" => Some(("job.postingCreateTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-expire-time" => Some(("job.postingExpireTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-publish-time" => Some(("job.postingPublishTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-region" => Some(("job.postingRegion", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.posting-update-time" => Some(("job.postingUpdateTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.processing-options.disable-street-address-resolution" => Some(("job.processingOptions.disableStreetAddressResolution", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job.processing-options.html-sanitization" => Some(("job.processingOptions.htmlSanitization", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.promotion-value" => Some(("job.promotionValue", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job.qualifications" => Some(("job.qualifications", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.requisition-id" => Some(("job.requisitionId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.responsibilities" => Some(("job.responsibilities", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.title" => Some(("job.title", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job.visibility" => Some(("job.visibility", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"update-mask" => Some(("updateMask", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["addresses", "annualized-base-compensation-range", "annualized-total-compensation-range", "application-info", "company-display-name", "company-name", "compensation-info", "currency-code", "degree-types", "department", "derived-info", "description", "disable-street-address-resolution", "emails", "employment-types", "html-sanitization", "incentives", "instruction", "job", "job-benefits", "job-categories", "job-end-time", "job-level", "job-start-time", "language-code", "max-compensation", "min-compensation", "name", "nanos", "posting-create-time", "posting-expire-time", "posting-publish-time", "posting-region", "posting-update-time", "processing-options", "promotion-value", "qualifications", "requisition-id", "responsibilities", "title", "units", "update-mask", "uris", "visibility"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::UpdateJobRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().jobs_patch(request, opt.value_of("name").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_search(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"disable-keyword-match" => Some(("disableKeywordMatch", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"diversification-level" => Some(("diversificationLevel", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"enable-broadening" => Some(("enableBroadening", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"histogram-facets.simple-histogram-facets" => Some(("histogramFacets.simpleHistogramFacets", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.commute-filter.allow-imprecise-addresses" => Some(("jobQuery.commuteFilter.allowImpreciseAddresses", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.commute-filter.commute-method" => Some(("jobQuery.commuteFilter.commuteMethod", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.hours" => Some(("jobQuery.commuteFilter.departureTime.hours", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.minutes" => Some(("jobQuery.commuteFilter.departureTime.minutes", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.nanos" => Some(("jobQuery.commuteFilter.departureTime.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.seconds" => Some(("jobQuery.commuteFilter.departureTime.seconds", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.road-traffic" => Some(("jobQuery.commuteFilter.roadTraffic", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.commute-filter.start-coordinates.latitude" => Some(("jobQuery.commuteFilter.startCoordinates.latitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"job-query.commute-filter.start-coordinates.longitude" => Some(("jobQuery.commuteFilter.startCoordinates.longitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"job-query.commute-filter.travel-duration" => Some(("jobQuery.commuteFilter.travelDuration", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.company-display-names" => Some(("jobQuery.companyDisplayNames", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.company-names" => Some(("jobQuery.companyNames", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.compensation-filter.include-jobs-with-unspecified-compensation-range" => Some(("jobQuery.compensationFilter.includeJobsWithUnspecifiedCompensationRange", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.currency-code" => Some(("jobQuery.compensationFilter.range.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.nanos" => Some(("jobQuery.compensationFilter.range.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.units" => Some(("jobQuery.compensationFilter.range.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.currency-code" => Some(("jobQuery.compensationFilter.range.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.nanos" => Some(("jobQuery.compensationFilter.range.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.units" => Some(("jobQuery.compensationFilter.range.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.type" => Some(("jobQuery.compensationFilter.type", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.units" => Some(("jobQuery.compensationFilter.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.custom-attribute-filter" => Some(("jobQuery.customAttributeFilter", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.disable-spell-check" => Some(("jobQuery.disableSpellCheck", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.employment-types" => Some(("jobQuery.employmentTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.job-categories" => Some(("jobQuery.jobCategories", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.language-codes" => Some(("jobQuery.languageCodes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.publish-time-range.end-time" => Some(("jobQuery.publishTimeRange.endTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.publish-time-range.start-time" => Some(("jobQuery.publishTimeRange.startTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.query" => Some(("jobQuery.query", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.query-language-code" => Some(("jobQuery.queryLanguageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-view" => Some(("jobView", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"offset" => Some(("offset", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"order-by" => Some(("orderBy", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"page-size" => Some(("pageSize", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"page-token" => Some(("pageToken", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.device-info.device-type" => Some(("requestMetadata.deviceInfo.deviceType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.device-info.id" => Some(("requestMetadata.deviceInfo.id", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.domain" => Some(("requestMetadata.domain", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.session-id" => Some(("requestMetadata.sessionId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.user-id" => Some(("requestMetadata.userId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"require-precise-result-size" => Some(("requirePreciseResultSize", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"search-mode" => Some(("searchMode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["allow-imprecise-addresses", "commute-filter", "commute-method", "company-display-names", "company-names", "compensation-filter", "currency-code", "custom-attribute-filter", "departure-time", "device-info", "device-type", "disable-keyword-match", "disable-spell-check", "diversification-level", "domain", "employment-types", "enable-broadening", "end-time", "histogram-facets", "hours", "id", "include-jobs-with-unspecified-compensation-range", "job-categories", "job-query", "job-view", "language-codes", "latitude", "longitude", "max-compensation", "min-compensation", "minutes", "nanos", "offset", "order-by", "page-size", "page-token", "publish-time-range", "query", "query-language-code", "range", "request-metadata", "require-precise-result-size", "road-traffic", "search-mode", "seconds", "session-id", "simple-histogram-facets", "start-coordinates", "start-time", "travel-duration", "type", "units", "user-id"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::SearchJobsRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().jobs_search(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _projects_jobs_search_for_alert(&self, opt: &ArgMatches<'n>, dry_run: bool, err: &mut InvalidOptionsError)
-> Result<(), DoitError> {
let mut field_cursor = FieldCursor::default();
let mut object = json::value::Value::Object(Default::default());
for kvarg in opt.values_of("kv").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let last_errc = err.issues.len();
let (key, value) = parse_kv_arg(&*kvarg, err, false);
let mut temp_cursor = field_cursor.clone();
if let Err(field_err) = temp_cursor.set(&*key) {
err.issues.push(field_err);
}
if value.is_none() {
field_cursor = temp_cursor.clone();
if err.issues.len() > last_errc {
err.issues.remove(last_errc);
}
continue;
}
let type_info: Option<(&'static str, JsonTypeInfo)> =
match &temp_cursor.to_string()[..] {
"disable-keyword-match" => Some(("disableKeywordMatch", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"diversification-level" => Some(("diversificationLevel", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"enable-broadening" => Some(("enableBroadening", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"histogram-facets.simple-histogram-facets" => Some(("histogramFacets.simpleHistogramFacets", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.commute-filter.allow-imprecise-addresses" => Some(("jobQuery.commuteFilter.allowImpreciseAddresses", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.commute-filter.commute-method" => Some(("jobQuery.commuteFilter.commuteMethod", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.hours" => Some(("jobQuery.commuteFilter.departureTime.hours", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.minutes" => Some(("jobQuery.commuteFilter.departureTime.minutes", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.nanos" => Some(("jobQuery.commuteFilter.departureTime.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.departure-time.seconds" => Some(("jobQuery.commuteFilter.departureTime.seconds", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.commute-filter.road-traffic" => Some(("jobQuery.commuteFilter.roadTraffic", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.commute-filter.start-coordinates.latitude" => Some(("jobQuery.commuteFilter.startCoordinates.latitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"job-query.commute-filter.start-coordinates.longitude" => Some(("jobQuery.commuteFilter.startCoordinates.longitude", JsonTypeInfo { jtype: JsonType::Float, ctype: ComplexType::Pod })),
"job-query.commute-filter.travel-duration" => Some(("jobQuery.commuteFilter.travelDuration", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.company-display-names" => Some(("jobQuery.companyDisplayNames", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.company-names" => Some(("jobQuery.companyNames", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.compensation-filter.include-jobs-with-unspecified-compensation-range" => Some(("jobQuery.compensationFilter.includeJobsWithUnspecifiedCompensationRange", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.currency-code" => Some(("jobQuery.compensationFilter.range.maxCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.nanos" => Some(("jobQuery.compensationFilter.range.maxCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.max-compensation.units" => Some(("jobQuery.compensationFilter.range.maxCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.currency-code" => Some(("jobQuery.compensationFilter.range.minCompensation.currencyCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.nanos" => Some(("jobQuery.compensationFilter.range.minCompensation.nanos", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"job-query.compensation-filter.range.min-compensation.units" => Some(("jobQuery.compensationFilter.range.minCompensation.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.type" => Some(("jobQuery.compensationFilter.type", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.compensation-filter.units" => Some(("jobQuery.compensationFilter.units", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.custom-attribute-filter" => Some(("jobQuery.customAttributeFilter", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.disable-spell-check" => Some(("jobQuery.disableSpellCheck", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"job-query.employment-types" => Some(("jobQuery.employmentTypes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.job-categories" => Some(("jobQuery.jobCategories", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.language-codes" => Some(("jobQuery.languageCodes", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Vec })),
"job-query.publish-time-range.end-time" => Some(("jobQuery.publishTimeRange.endTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.publish-time-range.start-time" => Some(("jobQuery.publishTimeRange.startTime", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.query" => Some(("jobQuery.query", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-query.query-language-code" => Some(("jobQuery.queryLanguageCode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"job-view" => Some(("jobView", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"offset" => Some(("offset", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"order-by" => Some(("orderBy", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"page-size" => Some(("pageSize", JsonTypeInfo { jtype: JsonType::Int, ctype: ComplexType::Pod })),
"page-token" => Some(("pageToken", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.device-info.device-type" => Some(("requestMetadata.deviceInfo.deviceType", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.device-info.id" => Some(("requestMetadata.deviceInfo.id", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.domain" => Some(("requestMetadata.domain", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.session-id" => Some(("requestMetadata.sessionId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"request-metadata.user-id" => Some(("requestMetadata.userId", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
"require-precise-result-size" => Some(("requirePreciseResultSize", JsonTypeInfo { jtype: JsonType::Boolean, ctype: ComplexType::Pod })),
"search-mode" => Some(("searchMode", JsonTypeInfo { jtype: JsonType::String, ctype: ComplexType::Pod })),
_ => {
let suggestion = FieldCursor::did_you_mean(key, &vec!["allow-imprecise-addresses", "commute-filter", "commute-method", "company-display-names", "company-names", "compensation-filter", "currency-code", "custom-attribute-filter", "departure-time", "device-info", "device-type", "disable-keyword-match", "disable-spell-check", "diversification-level", "domain", "employment-types", "enable-broadening", "end-time", "histogram-facets", "hours", "id", "include-jobs-with-unspecified-compensation-range", "job-categories", "job-query", "job-view", "language-codes", "latitude", "longitude", "max-compensation", "min-compensation", "minutes", "nanos", "offset", "order-by", "page-size", "page-token", "publish-time-range", "query", "query-language-code", "range", "request-metadata", "require-precise-result-size", "road-traffic", "search-mode", "seconds", "session-id", "simple-histogram-facets", "start-coordinates", "start-time", "travel-duration", "type", "units", "user-id"]);
err.issues.push(CLIError::Field(FieldError::Unknown(temp_cursor.to_string(), suggestion, value.map(|v| v.to_string()))));
None
}
};
if let Some((field_cursor_str, type_info)) = type_info {
FieldCursor::from(field_cursor_str).set_json_value(&mut object, value.unwrap(), type_info, err, &temp_cursor);
}
}
let mut request: api::SearchJobsRequest = json::value::from_value(object).unwrap();
let mut call = self.hub.projects().jobs_search_for_alert(request, opt.value_of("parent").unwrap_or(""));
for parg in opt.values_of("v").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
let (key, value) = parse_kv_arg(&*parg, err, false);
match key {
_ => {
let mut found = false;
for param in &self.gp {
if key == *param {
found = true;
call = call.param(self.gpm.iter().find(|t| t.0 == key).unwrap_or(&("", key)).1, value.unwrap_or("unset"));
break;
}
}
if !found {
err.issues.push(CLIError::UnknownParameter(key.to_string(),
{let mut v = Vec::new();
v.extend(self.gp.iter().map(|v|*v));
v } ));
}
}
}
}
let protocol = CallType::Standard;
if dry_run {
Ok(())
} else {
assert!(err.issues.len() == 0);
for scope in self.opt.values_of("url").map(|i|i.collect()).unwrap_or(Vec::new()).iter() {
call = call.add_scope(scope);
}
let mut ostream = match writer_from_opts(opt.value_of("out")) {
Ok(mut f) => f,
Err(io_err) => return Err(DoitError::IoError(opt.value_of("out").unwrap_or("-").to_string(), io_err)),
};
match match protocol {
CallType::Standard => call.doit().await,
_ => unreachable!()
} {
Err(api_err) => Err(DoitError::ApiError(api_err)),
Ok((mut response, output_schema)) => {
let mut value = json::value::to_value(&output_schema).expect("serde to work");
remove_json_null_values(&mut value);
json::to_writer_pretty(&mut ostream, &value).unwrap();
ostream.flush().unwrap();
Ok(())
}
}
}
}
async fn _doit(&self, dry_run: bool) -> Result<Result<(), DoitError>, Option<InvalidOptionsError>> {
let mut err = InvalidOptionsError::new();
let mut call_result: Result<(), DoitError> = Ok(());
let mut err_opt: Option<InvalidOptionsError> = None;
match self.opt.subcommand() {
("projects", Some(opt)) => {
match opt.subcommand() {
("client-events-create", Some(opt)) => {
call_result = self._projects_client_events_create(opt, dry_run, &mut err).await;
},
("companies-create", Some(opt)) => {
call_result = self._projects_companies_create(opt, dry_run, &mut err).await;
},
("companies-delete", Some(opt)) => {
call_result = self._projects_companies_delete(opt, dry_run, &mut err).await;
},
("companies-get", Some(opt)) => {
call_result = self._projects_companies_get(opt, dry_run, &mut err).await;
},
("companies-list", Some(opt)) => {
call_result = self._projects_companies_list(opt, dry_run, &mut err).await;
},
("companies-patch", Some(opt)) => {
call_result = self._projects_companies_patch(opt, dry_run, &mut err).await;
},
("complete", Some(opt)) => {
call_result = self._projects_complete(opt, dry_run, &mut err).await;
},
("jobs-batch-delete", Some(opt)) => {
call_result = self._projects_jobs_batch_delete(opt, dry_run, &mut err).await;
},
("jobs-create", Some(opt)) => {
call_result = self._projects_jobs_create(opt, dry_run, &mut err).await;
},
("jobs-delete", Some(opt)) => {
call_result = self._projects_jobs_delete(opt, dry_run, &mut err).await;
},
("jobs-get", Some(opt)) => {
call_result = self._projects_jobs_get(opt, dry_run, &mut err).await;
},
("jobs-list", Some(opt)) => {
call_result = self._projects_jobs_list(opt, dry_run, &mut err).await;
},
("jobs-patch", Some(opt)) => {
call_result = self._projects_jobs_patch(opt, dry_run, &mut err).await;
},
("jobs-search", Some(opt)) => {
call_result = self._projects_jobs_search(opt, dry_run, &mut err).await;
},
("jobs-search-for-alert", Some(opt)) => {
call_result = self._projects_jobs_search_for_alert(opt, dry_run, &mut err).await;
},
_ => {
err.issues.push(CLIError::MissingMethodError("projects".to_string()));
writeln!(io::stderr(), "{}\n", opt.usage()).ok();
}
}
},
_ => {
err.issues.push(CLIError::MissingCommandError);
writeln!(io::stderr(), "{}\n", self.opt.usage()).ok();
}
}
if dry_run {
if err.issues.len() > 0 {
err_opt = Some(err);
}
Err(err_opt)
} else {
Ok(call_result)
}
}
// Please note that this call will fail if any part of the opt can't be handled
async fn new(opt: ArgMatches<'n>) -> Result<Engine<'n>, InvalidOptionsError> {
let (config_dir, secret) = {
let config_dir = match client::assure_config_dir_exists(opt.value_of("folder").unwrap_or("~/.google-service-cli")) {
Err(e) => return Err(InvalidOptionsError::single(e, 3)),
Ok(p) => p,
};
match client::application_secret_from_directory(&config_dir, "jobs3-secret.json",
"{\"installed\":{\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\",\"client_secret\":\"hCsslbCUyfehWMmbkG8vTYxG\",\"token_uri\":\"https://accounts.google.com/o/oauth2/token\",\"client_email\":\"\",\"redirect_uris\":[\"urn:ietf:wg:oauth:2.0:oob\",\"oob\"],\"client_x509_cert_url\":\"\",\"client_id\":\"620010449518-9ngf7o4dhs0dka470npqvor6dc5lqb9b.apps.googleusercontent.com\",\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"}}") {
Ok(secret) => (config_dir, secret),
Err(e) => return Err(InvalidOptionsError::single(e, 4))
}
};
let auth = oauth2::InstalledFlowAuthenticator::builder(
secret,
oauth2::InstalledFlowReturnMethod::HTTPRedirect,
).persist_tokens_to_disk(format!("{}/jobs3", config_dir)).build().await.unwrap();
let client = hyper::Client::builder().build(hyper_rustls::HttpsConnector::with_native_roots());
let engine = Engine {
opt: opt,
hub: api::CloudTalentSolution::new(client, auth),
gp: vec!["$-xgafv", "access-token", "alt", "callback", "fields", "key", "oauth-token", "pretty-print", "quota-user", "upload-type", "upload-protocol"],
gpm: vec![
("$-xgafv", "$.xgafv"),
("access-token", "access_token"),
("oauth-token", "oauth_token"),
("pretty-print", "prettyPrint"),
("quota-user", "quotaUser"),
("upload-type", "uploadType"),
("upload-protocol", "upload_protocol"),
]
};
match engine._doit(true).await {
Err(Some(err)) => Err(err),
Err(None) => Ok(engine),
Ok(_) => unreachable!(),
}
}
async fn doit(&self) -> Result<(), DoitError> {
match self._doit(false).await {
Ok(res) => res,
Err(_) => unreachable!(),
}
}
}
#[tokio::main]
async fn main() {
let mut exit_status = 0i32;
let arg_data = [
("projects", "methods: 'client-events-create', 'companies-create', 'companies-delete', 'companies-get', 'companies-list', 'companies-patch', 'complete', 'jobs-batch-delete', 'jobs-create', 'jobs-delete', 'jobs-get', 'jobs-list', 'jobs-patch', 'jobs-search' and 'jobs-search-for-alert'", vec. [Learn more](https://cloud.google.com/talent-solution/docs/management-tools) about self service tools."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_client-events-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Parent project name."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("companies-create",
Some(r##"Creates a new company entity."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_companies-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. Resource name of the project under which the company is created. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("companies-delete",
Some(r##"Deletes specified company. Prerequisite: The company has no jobs associated with it."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_companies-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The resource name of the company to be deleted. The format is "projects/{project_id}/companies/{company_id}", for example, "projects/api-test-project/companies/foo"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("companies-get",
Some(r##"Retrieves specified company."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_companies-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The resource name of the company to be retrieved. The format is "projects/{project_id}/companies/{company_id}", for example, "projects/api-test-project/companies/foo"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("companies-list",
Some(r##"Lists all companies associated with the service account."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_companies-list",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. Resource name of the project under which the company is created. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("companies-patch",
Some(r##"Updates specified company. Company names can't be updated. To update a company name, delete the company and all jobs associated with it, and only then re-create them."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_companies-patch",
vec![
(Some(r##"name"##),
None,
Some(r##"Required during company update. The resource name for a company. This is generated by the service when a company is created. The format is "projects/{project_id}/companies/{company_id}", for example, "projects/api-test-project/companies/foo"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("complete",
Some(r##"Completes the specified prefix with keyword suggestions. Intended for use by a job search auto-complete search box."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_complete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. Resource name of project the completion is performed within. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-batch-delete",
Some(r##"Deletes a list of Jobs by filter."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-batch-delete",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The resource name of the project under which the job is created. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-create",
Some(r##"Creates a new job. Typically, the job becomes searchable within 10 seconds, but it may take up to 5 minutes."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-create",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The resource name of the project under which the job is created. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-delete",
Some(r##"Deletes the specified job. Typically, the job becomes unsearchable within 10 seconds, but it may take up to 5 minutes."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-delete",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The resource name of the job to be deleted. The format is "projects/{project_id}/jobs/{job_id}", for example, "projects/api-test-project/jobs/1234"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-get",
Some(r##"Retrieves the specified job, whose status is OPEN or recently EXPIRED within the last 90 days."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-get",
vec![
(Some(r##"name"##),
None,
Some(r##"Required. The resource name of the job to retrieve. The format is "projects/{project_id}/jobs/{job_id}", for example, "projects/api-test-project/jobs/1234"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-list",
Some(r##"Lists jobs by filter."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-list",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The resource name of the project under which the job is created. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-patch",
Some(r##"Updates specified job. Typically, updated contents become visible in search results within 10 seconds, but it may take up to 5 minutes."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-patch",
vec![
(Some(r##"name"##),
None,
Some(r##"Required during job update. The resource name for the job. This is generated by the service when a job is created. The format is "projects/{project_id}/jobs/{job_id}", for example, "projects/api-test-project/jobs/1234". Use of this field in job queries and API calls is preferred over the use of requisition_id since this value is unique."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-search",
Some(r##"Searches for jobs using the provided SearchJobsRequest. This call constrains the visibility of jobs present in the database, and only returns jobs that the caller has permission to search against."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-search",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The resource name of the project to search within. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
("jobs-search-for-alert",
Some(r##"Searches for jobs using the provided SearchJobsRequest. This API call is intended for the use case of targeting passive job seekers (for example, job seekers who have signed up to receive email alerts about potential job opportunities), and has different algorithmic adjustments that are targeted to passive job seekers. This call constrains the visibility of jobs present in the database, and only returns jobs the caller has permission to search against."##),
"Details at http://byron.github.io/google-apis-rs/google_jobs3_cli/projects_jobs-search-for-alert",
vec![
(Some(r##"parent"##),
None,
Some(r##"Required. The resource name of the project to search within. The format is "projects/{project_id}", for example, "projects/api-test-project"."##),
Some(true),
Some(false)),
(Some(r##"kv"##),
Some(r##"r"##),
Some(r##"Set various fields of the request structure, matching the key=value form"##),
Some(true),
Some(true)),
(Some(r##"v"##),
Some(r##"p"##),
Some(r##"Set various optional parameters, matching the key=value form"##),
Some(false),
Some(true)),
(Some(r##"out"##),
Some(r##"o"##),
Some(r##"Specify the file into which to write the program's output"##),
Some(false),
Some(false)),
]),
]),
];
let mut app = App::new("jobs3")
.author("Sebastian Thiel <[email protected]>")
.version("3.0.0+20220211")
.about("Cloud Talent Solution provides the capability to create, read, update, and delete job postings, as well as search jobs based on keywords and filters. ")
.after_help("All documentation details can be found at http://byron.github.io/google-apis-rs/google_jobs3_cli")
.arg(Arg::with_name("url")
.long("scope")
.help("Specify the authentication a method should be executed in. Each scope requires the user to grant this application permission to use it.If unset, it defaults to the shortest scope url for a particular method.")
.multiple(true)
.takes_value(true))
.arg(Arg::with_name("folder")
.long("config-dir")
.help("A directory into which we will store our persistent data. Defaults to a user-writable directory that we will create during the first invocation.[default: ~/.google-service-cli")
.multiple(false)
.takes_value(true))
.arg(Arg::with_name("debug")
.long("debug")
.help("Debug print all errors")
.multiple(false)
.takes_value(false));
for &(main_command_name, about, ref subcommands) in arg_data.iter() {
let mut mcmd = SubCommand::with_name(main_command_name).about(about);
for &(sub_command_name, ref desc, url_info, ref args) in subcommands {
let mut scmd = SubCommand::with_name(sub_command_name);
if let &Some(desc) = desc {
scmd = scmd.about(desc);
}
scmd = scmd.after_help(url_info);
for &(ref arg_name, ref flag, ref desc, ref required, ref multi) in args {
let arg_name_str =
match (arg_name, flag) {
(&Some(an), _ ) => an,
(_ , &Some(f)) => f,
_ => unreachable!(),
};
let mut arg = Arg::with_name(arg_name_str)
.empty_values(false);
if let &Some(short_flag) = flag {
arg = arg.short(short_flag);
}
if let &Some(desc) = desc {
arg = arg.help(desc);
}
if arg_name.is_some() && flag.is_some() {
arg = arg.takes_value(true);
}
if let &Some(required) = required {
arg = arg.required(required);
}
if let &Some(multi) = multi {
arg = arg.multiple(multi);
}
scmd = scmd.arg(arg);
}
mcmd = mcmd.subcommand(scmd);
}
app = app.subcommand(mcmd);
}
let matches = app.get_matches();
let debug = matches.is_present("debug");
match Engine::new(matches).await {
Err(err) => {
exit_status = err.exit_code;
writeln!(io::stderr(), "{}", err).ok();
},
Ok(engine) => {
if let Err(doit_err) = engine.doit().await {
exit_status = 1;
match doit_err {
DoitError::IoError(path, err) => {
writeln!(io::stderr(), "Failed to open output file '{}': {}", path, err).ok();
},
DoitError::ApiError(err) => {
if debug {
writeln!(io::stderr(), "{:#?}", err).ok();
} else {
writeln!(io::stderr(), "{}", err).ok();
}
}
}
}
}
}
std::process::exit(exit_status);
}
| 66.963433 | 998 | 0.527626 |
234759e9b9bff73a5ec8786323c7a7d28d20bb90
| 1,173 |
//! Procedural macros supporting [Neon](https://docs.rs/neon/latest/neon/)
#[cfg(feature = "napi")]
mod napi;
#[cfg(feature = "napi")]
use napi as macros;
#[cfg(not(feature = "napi"))]
mod legacy;
#[cfg(not(feature = "napi"))]
use legacy as macros;
// Proc macro definitions must be in the root of the crate
// Implementations are in the backend dependent module
#[proc_macro_attribute]
/// Marks a method as the main entrypoint for initialization in a Neon
/// module. This attribute should only be used _once_ in a module and will
/// be called each time the module is initialized in a context.
///
/// ```ignore
/// # use neon::prelude::*;
/// #[neon::main]
/// fn my_module(mut cx: ModuleContext) -> NeonResult<()> {
/// let version = cx.string("1.0.0");
///
/// cx.export_value("version", version)?;
///
/// Ok(())
/// }
/// ```
///
/// If multiple functions are marked with `#[neon::main]`, there may be a compile error:
///
/// ```sh
/// error: symbol `napi_register_module_v1` is already defined
/// ```
pub fn main(
attr: proc_macro::TokenStream,
item: proc_macro::TokenStream,
) -> proc_macro::TokenStream {
macros::main(attr, item)
}
| 26.659091 | 88 | 0.653026 |
d6fab65c82a58110ed13ea2d03914abe8be2d8f8
| 53 |
//! Utilities for testing.
#[macro_use]
mod macros;
| 10.6 | 26 | 0.698113 |
6af5b1868064035d89b2a42ada4bbfddef7ee491
| 81 |
// check-pass
#![feature(decl_macro)]
macro foo {
() => {},
}
fn main() {}
| 9 | 23 | 0.493827 |
cc98628a4a1f0c7b0663b41fc6c598abe7ee8ac7
| 684 |
use std::io::{stdout, BufWriter};
use teehee::hex_view::HexView;
use teehee::{Buffer, Buffers};
const STDOUT_BUF: usize = 8192;
fn main() {
let stdout = stdout();
let mut stdout = BufWriter::with_capacity(STDOUT_BUF, stdout.lock());
let filename = std::env::args().nth(1);
let buffers = filename
.as_ref()
.map(|filename| {
Buffers::with_buffer(Buffer::from_data_and_path(
std::fs::read(&filename).expect("Couldn't read file"),
Some(filename),
))
})
.unwrap_or_else(Buffers::new);
let view = HexView::with_buffers(buffers);
view.run_event_loop(&mut stdout).unwrap();
}
| 28.5 | 73 | 0.599415 |
75c775069bb2e5b42a26f8ecf3eb162ccb11233c
| 3,697 |
use std::path::PathBuf;
use sc_cli;
use structopt::StructOpt;
/// Sub-commands supported by the collator.
#[derive(Debug, StructOpt)]
pub enum Subcommand {
/// Export the genesis state of the parachain.
#[structopt(name = "export-genesis-state")]
ExportGenesisState(ExportGenesisStateCommand),
/// Export the genesis wasm of the parachain.
#[structopt(name = "export-genesis-wasm")]
ExportGenesisWasm(ExportGenesisWasmCommand),
/// Build a chain specification.
BuildSpec(sc_cli::BuildSpecCmd),
/// Validate blocks.
CheckBlock(sc_cli::CheckBlockCmd),
/// Export blocks.
ExportBlocks(sc_cli::ExportBlocksCmd),
/// Export the state of a given block into a chain spec.
ExportState(sc_cli::ExportStateCmd),
/// Import blocks.
ImportBlocks(sc_cli::ImportBlocksCmd),
/// Remove the whole chain.
PurgeChain(sc_cli::PurgeChainCmd),
/// Revert the chain to a previous state.
Revert(sc_cli::RevertCmd),
/// The custom benchmark subcommmand benchmarking runtime pallets.
#[structopt(name = "benchmark", about = "Benchmark runtime pallets.")]
Benchmark(frame_benchmarking_cli::BenchmarkCmd),
}
/// Command for exporting the genesis state of the parachain
#[derive(Debug, StructOpt)]
pub struct ExportGenesisStateCommand {
/// Output file name or stdout if unspecified.
#[structopt(parse(from_os_str))]
pub output: Option<PathBuf>,
/// Id of the parachain this state is for.
///
/// Default: 200
#[structopt(long)]
pub parachain_id: Option<u32>,
/// Write output in binary. Default is to write in hex.
#[structopt(short, long)]
pub raw: bool,
/// The name of the chain for that the genesis state should be exported.
#[structopt(long)]
pub chain: Option<String>,
}
/// Command for exporting the genesis wasm file.
#[derive(Debug, StructOpt)]
pub struct ExportGenesisWasmCommand {
/// Output file name or stdout if unspecified.
#[structopt(parse(from_os_str))]
pub output: Option<PathBuf>,
/// Write output in binary. Default is to write in hex.
#[structopt(short, long)]
pub raw: bool,
/// The name of the chain for that the genesis wasm file should be exported.
#[structopt(long)]
pub chain: Option<String>,
}
#[derive(Debug, StructOpt)]
pub struct RunCmd {
#[structopt(flatten)]
pub base: sc_cli::RunCmd,
/// Id of the parachain this collator collates for.
#[structopt(long)]
pub parachain_id: Option<u32>,
}
impl std::ops::Deref for RunCmd {
type Target = sc_cli::RunCmd;
fn deref(&self) -> &Self::Target {
&self.base
}
}
#[derive(Debug, StructOpt)]
#[structopt(settings = &[
structopt::clap::AppSettings::GlobalVersion,
structopt::clap::AppSettings::ArgsNegateSubcommands,
structopt::clap::AppSettings::SubcommandsNegateReqs,
])]
pub struct Cli {
#[structopt(subcommand)]
pub subcommand: Option<Subcommand>,
#[structopt(flatten)]
pub run: RunCmd,
/// Run node as collator.
///
/// Note that this is the same as running with `--validator`.
#[structopt(long, conflicts_with = "validator")]
pub collator: bool,
/// Relaychain arguments
#[structopt(raw = true)]
pub relaychain_args: Vec<String>,
}
#[derive(Debug)]
pub struct RelayChainCli {
/// The actual relay chain cli object.
pub base: polkadot_cli::RunCmd,
/// Optional chain id that should be passed to the relay chain.
pub chain_id: Option<String>,
/// The base path that should be used by the relay chain.
pub base_path: Option<PathBuf>,
}
impl RelayChainCli {
/// Create a new instance of `Self`.
pub fn new<'a>(
base_path: Option<PathBuf>,
chain_id: Option<String>,
relay_chain_args: impl Iterator<Item = &'a String>,
) -> Self {
Self {
base_path,
chain_id,
base: polkadot_cli::RunCmd::from_iter(relay_chain_args),
}
}
}
| 24.812081 | 77 | 0.71815 |
f99f6acd5b9a9c6611a4a2005615aa8e32a497d5
| 1,357 |
use indexmap::IndexMap;
use crate::{
stdlib::common::architecture::assignment::{AssignmentKind, RangeConstraint},
};
/// An enum for describing complete assignment to an array
#[derive(Debug, Clone)]
pub enum ArrayAssignment {
/// Assigning all of an array directly (may concatenate objects)
Direct(Vec<AssignmentKind>),
/// Assign some fields directly, and may assign all other fields a single value (e.g. ( 1 => '1', others => '0' ), or ( 1 downto 0 => '1', others => '0' ))
Sliced {
direct: IndexMap<RangeConstraint, AssignmentKind>,
others: Option<Box<AssignmentKind>>,
},
/// Assigning a single value to all of an array
Others(Box<AssignmentKind>),
}
impl ArrayAssignment {
pub fn direct(values: Vec<AssignmentKind>) -> ArrayAssignment {
ArrayAssignment::Direct(values)
}
pub fn partial(
direct: IndexMap<RangeConstraint, AssignmentKind>,
others: Option<AssignmentKind>,
) -> ArrayAssignment {
ArrayAssignment::Sliced {
direct,
others: match others {
Some(value) => Some(Box::new(value)),
None => None,
},
}
}
pub fn others(value: AssignmentKind) -> ArrayAssignment {
ArrayAssignment::Others(Box::new(value))
}
}
| 31.55814 | 160 | 0.6028 |
d5b6901178483bd0e737191475a993673ecb1a29
| 21,549 |
//! Logic for building and using an index over a bitarray which provides rank and select.
use byteorder::{BigEndian, ByteOrder};
use bytes::Bytes;
use super::bitarray::*;
use super::logarray::*;
use crate::storage::SyncableFile;
use futures::io;
use futures::stream::StreamExt;
use tokio::io::AsyncRead;
// a block is 64 bit, which is the register size on modern architectures
// Block size is not tunable, and therefore no const is defined here.
/// The amount of 64-bit blocks that go into a superblock.
const SBLOCK_SIZE: usize = 52;
/// A bitarray with an index, supporting rank and select queries.
#[derive(Clone)]
pub struct BitIndex {
array: BitArray,
blocks: LogArray,
sblocks: LogArray,
}
impl BitIndex {
pub fn from_maps(bitarray_map: Bytes, blocks_map: Bytes, sblocks_map: Bytes) -> BitIndex {
let bitarray = BitArray::from_bits(bitarray_map).unwrap();
let blocks_logarray = LogArray::parse(blocks_map).unwrap();
let sblocks_logarray = LogArray::parse(sblocks_map).unwrap();
BitIndex::from_parts(bitarray, blocks_logarray, sblocks_logarray)
}
pub fn from_parts(array: BitArray, blocks: LogArray, sblocks: LogArray) -> BitIndex {
assert!(sblocks.len() == (blocks.len() + SBLOCK_SIZE - 1) / SBLOCK_SIZE);
assert!(blocks.len() == (array.len() + 63) / 64);
BitIndex {
array,
blocks,
sblocks,
}
}
fn block_bits(&self, block_index: usize) -> &[u8] {
let bit_index = block_index * 8;
&self.array.bits()[bit_index..bit_index + 8]
}
/// Returns the length of the underlying bitarray.
pub fn len(&self) -> usize {
self.array.len()
}
/// Returns the bit at the given index.
pub fn get(&self, index: u64) -> bool {
self.array.get(index as usize)
}
/// Returns the amount of 1-bits in the bitarray up to and including the given index.
pub fn rank1(&self, index: u64) -> u64 {
let block_index = index / 64;
let sblock_index = block_index / SBLOCK_SIZE as u64;
let block_rank = self.blocks.entry(block_index as usize);
let sblock_rank = self.sblocks.entry(sblock_index as usize);
let bits = self.block_bits(block_index as usize);
assert!(bits.len() == 8);
let mut bits_num = BigEndian::read_u64(bits);
bits_num >>= 63 - index % 64; // shift out numbers we don't care about
let bits_rank = bits_num.count_ones() as u64;
sblock_rank - block_rank + bits_rank
}
/// Returns the amount of 1-bits in the given range (up to but excluding end).
pub fn rank1_from_range(&self, start: u64, end: u64) -> u64 {
if start == end {
return 0;
}
let mut rank = self.rank1(end - 1);
if start != 0 {
rank -= self.rank1(start - 1);
}
rank
}
fn select1_sblock(&self, rank: u64) -> usize {
let mut start = 0;
let mut end = self.sblocks.len() - 1;
let mut mid;
loop {
mid = (start + end) / 2;
if start == end {
break;
}
let r = self.sblocks.entry(mid);
match r < rank {
true => start = mid + 1,
false => end = mid,
}
}
mid
}
fn select1_block(&self, sblock: usize, subrank: u64) -> usize {
let mut start = sblock * SBLOCK_SIZE;
let mut end = start + SBLOCK_SIZE - 1;
if end > self.blocks.len() - 1 {
end = self.blocks.len() - 1;
}
let mut mid;
// inside a superblock, block subranks cache superblock_rank - sum_i<block_(blockrank_i).
// Or another way to think of this, each block subrank specifies where in the superblock
// this block starts. if a superblock has a rank of 1000, and the first block has a rank of 50,
// the second block will have a subrank of 1000-50=950.
// Suppose the second block has a rank of 20, then the third block will have a subrank of 950-20=930.
//
// To find the proper block, we're trying to find the rightmost block with a subrank greater than the
// subrank we're looking for.
loop {
mid = (start + end + 1) / 2;
if start == end {
break;
}
let r = self.blocks.entry(mid);
match r > subrank {
true => start = mid,
false => end = mid - 1,
}
}
mid
}
/// Returns the index of the 1-bit in the bitarray corresponding with the given rank.
pub fn select1(&self, rank: u64) -> Option<u64> {
let sblock = self.select1_sblock(rank);
let sblock_rank = self.sblocks.entry(sblock);
if sblock_rank < rank {
return None;
}
let block = self.select1_block(sblock, sblock_rank - rank);
let block_subrank = self.blocks.entry(block);
let rank_in_block = rank - (sblock_rank - block_subrank);
assert!(rank_in_block <= 64);
let bits = self.block_bits(block);
let mut bits_num = BigEndian::read_u64(bits);
let mut tally = rank_in_block;
for i in 0..64 {
if bits_num & 0x8000000000000000 != 0 {
tally -= 1;
if tally == 0 {
return Some(block as u64 * 64 + i);
}
}
bits_num <<= 1;
}
None
}
pub fn select1_from_range(&self, subrank: u64, start: u64, end: u64) -> Option<u64> {
// todo this is a dumb implementation. we can actually do a much faster select by making sblock/block lookup ranged. for now this will work.
let rank_offset = if start == 0 { 0 } else { self.rank1(start - 1) };
let result = self.select1(rank_offset + subrank)?;
if result < start && start < end && subrank == 0 && !self.get(start) {
Some(start)
} else if result < start || result >= end {
None
} else {
Some(result)
}
}
/// Returns the amount of 0-bits in the bitarray up to and including the given index.
pub fn rank0(&self, index: u64) -> u64 {
let r0 = self.rank1(index);
1 + index - r0
}
/// Returns the amount of 0-bits in the given range (up to but excluding end).
pub fn rank0_from_range(&self, start: u64, end: u64) -> u64 {
if start == end {
return 0;
}
let mut rank = self.rank0(end - 1);
if start != 0 {
rank -= self.rank0(start - 1);
}
rank
}
fn select0_sblock(&self, rank: u64) -> usize {
let mut start = 0;
let mut end = self.sblocks.len() - 1;
let mut mid;
loop {
mid = (start + end) / 2;
if start == end {
break;
}
let r = ((1 + mid) * SBLOCK_SIZE) as u64 * 64 - self.sblocks.entry(mid);
match r < rank {
true => start = mid + 1,
false => end = mid,
}
}
mid
}
fn select0_block(&self, sblock: usize, subrank: u64) -> usize {
let mut start = sblock * SBLOCK_SIZE;
let mut end = start + SBLOCK_SIZE - 1;
if end > self.blocks.len() - 1 {
end = self.blocks.len() - 1;
}
let mut mid;
// inside a superblock, block subranks cache superblock_rank - sum_i<block_(blockrank_i).
// Or another way to think of this, each block subrank specifies where in the superblock
// this block starts. if a superblock has a rank of 1000, and the first block has a rank of 50,
// the second block will have a subrank of 1000-50=950.
// Suppose the second block has a rank of 20, then the third block will have a subrank of 950-20=930.
//
// To find the proper block, we're trying to find the rightmost block with a subrank greater than the
// subrank we're looking for.
loop {
mid = (start + end + 1) / 2;
if start == end {
break;
}
let r = (SBLOCK_SIZE - mid % SBLOCK_SIZE) as u64 * 64 - self.blocks.entry(mid);
match r > subrank {
true => start = mid,
false => end = mid - 1,
}
}
mid
}
/// Returns the index of the 0-bit in the bitarray corresponding with the given rank.
pub fn select0(&self, rank: u64) -> Option<u64> {
let sblock = self.select0_sblock(rank);
let sblock_rank = ((1 + sblock) * SBLOCK_SIZE * 64) as u64 - self.sblocks.entry(sblock);
if sblock_rank < rank {
return None;
}
let block = self.select0_block(sblock, sblock_rank - rank);
let block_subrank =
(SBLOCK_SIZE - block % SBLOCK_SIZE) as u64 * 64 - self.blocks.entry(block);
let rank_in_block = rank - (sblock_rank - block_subrank);
assert!(rank_in_block <= 64);
let bits = self.block_bits(block);
let mut bits_num = BigEndian::read_u64(bits);
let mut tally = rank_in_block;
for i in 0..64 {
if bits_num & 0x8000000000000000 == 0 {
tally -= 1;
if tally == 0 {
return Some(block as u64 * 64 + i);
}
}
bits_num <<= 1;
}
None
}
pub fn select0_from_range(&self, subrank: u64, start: u64, end: u64) -> Option<u64> {
// todo this is a dumb implementation. we can actually do a much faster select by making sblock/block lookup ranged. for now this will work.
let rank_offset = if start == 0 { 0 } else { self.rank0(start - 1) };
let result = self.select0(rank_offset + subrank)?;
if result < start && start < end && subrank == 0 && self.get(start) {
Some(start)
} else if result < start || result >= end {
None
} else {
Some(result)
}
}
pub fn iter(&self) -> impl Iterator<Item = bool> {
self.array.iter()
}
}
pub async fn build_bitindex<
R: 'static + AsyncRead + Unpin + Send,
W1: 'static + SyncableFile + Send,
W2: 'static + SyncableFile + Send,
>(
bitarray: R,
blocks: W1,
sblocks: W2,
) -> io::Result<()> {
let block_stream = bitarray_stream_blocks(bitarray);
// the following widths are unoptimized, but should always be large enough
let mut blocks_builder =
LogArrayFileBuilder::new(blocks, 64 - (SBLOCK_SIZE * 64).leading_zeros() as u8);
let mut sblocks_builder = LogArrayFileBuilder::new(sblocks, 64);
// we chunk block_stream into blocks of SBLOCK size for further processing
let mut sblock_rank = 0;
let mut stream = block_stream.chunks(SBLOCK_SIZE);
while let Some(chunk) = stream.next().await {
let mut block_ranks = Vec::with_capacity(chunk.len());
for num in chunk {
block_ranks.push(num?.count_ones() as u64);
}
let mut sblock_subrank = block_ranks.iter().sum();
sblock_rank += sblock_subrank;
for block_rank in block_ranks {
blocks_builder.push(sblock_subrank).await?;
sblock_subrank -= block_rank;
}
sblocks_builder.push(sblock_rank).await?;
}
blocks_builder.finalize().await?;
sblocks_builder.finalize().await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::memory::*;
use crate::storage::*;
use crate::structure::util::stream_iter_ok;
use futures::executor::block_on;
#[tokio::test]
async fn rank1_works() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
for i in 0..123456 {
assert_eq!(i / 3 + 1, index.rank1(i));
}
}
#[tokio::test]
async fn select1_works() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
for i in 1..(123456 / 3) {
assert_eq!((i - 1) * 3, index.select1(i).unwrap());
}
assert!(index.select1(123456 * 2 / 3).is_none());
}
#[tokio::test]
async fn rank1_ranged() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
assert_eq!(0, index.rank1_from_range(6, 6));
assert_eq!(1, index.rank1_from_range(6, 7));
assert_eq!(1, index.rank1_from_range(6, 8));
assert_eq!(2, index.rank1_from_range(6, 12));
assert_eq!(2, index.rank1_from_range(4, 12));
}
#[tokio::test]
async fn select1_ranged() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
assert_eq!(None, index.select1_from_range(0, 6, 6));
assert_eq!(None, index.select1_from_range(0, 6, 7));
assert_eq!(Some(6), index.select1_from_range(1, 6, 7));
assert_eq!(Some(7), index.select1_from_range(0, 7, 8));
assert_eq!(Some(9), index.select1_from_range(2, 5, 11));
assert_eq!(None, index.select1_from_range(123456, 5, 10));
}
#[tokio::test]
async fn rank0_works() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
for i in 0..123456 {
assert_eq!(1 + i - (i / 3 + 1), index.rank0(i));
}
}
#[tokio::test]
async fn select0_works() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
for i in 1..=(123456 * 2 / 3) {
assert_eq!(i + (i - 1) / 2, index.select0(i).unwrap());
}
assert_eq!(None, index.select0(123456 * 2 / 3 + 1));
}
#[tokio::test]
async fn rank0_ranged() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
assert_eq!(0, index.rank0_from_range(5, 5));
assert_eq!(1, index.rank0_from_range(5, 6));
assert_eq!(0, index.rank0_from_range(6, 6));
assert_eq!(2, index.rank0_from_range(5, 8));
assert_eq!(4, index.rank0_from_range(6, 12));
assert_eq!(6, index.rank0_from_range(4, 12));
}
#[tokio::test]
async fn select0_ranged() {
let bits = MemoryBackedStore::new();
let mut ba_builder = BitArrayFileBuilder::new(bits.open_write().await.unwrap());
let contents = (0..).map(|n| n % 3 == 0).take(123456);
block_on(async {
ba_builder.push_all(stream_iter_ok(contents)).await?;
ba_builder.finalize().await?;
Ok::<_, io::Error>(())
})
.unwrap();
let index_blocks = MemoryBackedStore::new();
let index_sblocks = MemoryBackedStore::new();
block_on(build_bitindex(
bits.open_read().await.unwrap(),
index_blocks.open_write().await.unwrap(),
index_sblocks.open_write().await.unwrap(),
))
.unwrap();
let index = BitIndex::from_maps(
block_on(bits.map()).unwrap(),
block_on(index_blocks.map()).unwrap(),
block_on(index_sblocks.map()).unwrap(),
);
assert_eq!(None, index.select0_from_range(0, 6, 6));
assert_eq!(Some(6), index.select0_from_range(0, 6, 7));
assert_eq!(None, index.select0_from_range(1, 6, 7));
assert_eq!(Some(7), index.select0_from_range(1, 6, 8));
assert_eq!(None, index.select0_from_range(0, 7, 8));
assert_eq!(Some(10), index.select0_from_range(4, 5, 11));
assert_eq!(None, index.select0_from_range(123456, 5, 10));
}
}
| 32.849085 | 148 | 0.559794 |
69524fc115a72c042d69781b8ed9069d47d0c257
| 4,572 |
use regex::Regex;
use serde_json::{Map, Value};
use super::*;
use crate::util;
const SEARCH_URL: &str =
"https://en.wikipedia.org/w/api.php?format=json\
&formatversion=2&action=query&list=search&srlimit=1&srprop=&srsearch=";
const ENTRY_URL: &str =
"https://en.wikipedia.org/w/api.php?format=json\
&action=query&prop=extracts|links&pllimit=100&exintro&explaintext&redirects=1&pageids=";
pub struct Wikipedia {
parens: Regex
}
impl Command for Wikipedia {
fn cmds(&self) -> Vec<String> {
abbrev("wikipedia")
}
fn usage(&self) -> String { "<query>".to_owned() }
fn fits(&self, size: usize) -> bool { size >= 1 }
fn auth(&self) -> Auth { Anyone }
fn run(&mut self, args: &[&str], _: &Context, db: &mut Db) -> Outcome {
Ok(vec![Reply(self.search(&args.join(" "), &db.client)?)])
}
}
impl Default for Wikipedia { fn default() -> Self { Self::new() } }
impl Wikipedia {
#[inline]
pub fn new() -> Self {
Self {
parens: Regex::new("\\s*\\([^()]+\\)").expect("Parens regex failed to compile")
}
}
#[inline]
fn clean(&self, s: &str) -> String {
self.parens.replace_all(&s.replace("(listen)", ""), "").replace(" ", " ")
}
fn search(&self, query: &str, cli: &reqwest::Client) -> Result<String, Error> {
let searches = serde_json::from_reader(
cli.get(&format!("{}{}", SEARCH_URL, encode(query))).send()?
)?;
let page = parse_page(&searches)
.ok_or_else(|| ParseErr(err_msg("Unable to parse results")))?;
let entry = serde_json::from_reader(
cli.get(&format!("{}{}", ENTRY_URL, encode(&page.to_string()))).send()?
)?;
self.get_entry(page, &entry)
.ok_or_else(||ParseErr(err_msg("Unable to parse entry")))?
}
fn get_entry(&self, page: u64, json: &Value) -> Option<Result<String, Error>> {
let result = json
.as_object()?
.get("query")?
.as_object()?
.get("pages")?
.as_object()?
.get(&page.to_string())?
.as_object()?;
let title = result.get("title")?.as_str()?;
let extract = result.get("extract")?.as_str()?;
let top = extract.split('\n').next()?;
if top.ends_with(':') && top.contains("refer") {
if let Some(disambig) = parse_disambig(title, result) {
return Some(Err(Ambiguous(0, disambig)))
}
}
Some( Ok(
util::trim(&format!(
"{} \x02{}\x02: {}",
format!("https://en.wikipedia.org/wiki/{}", encode(title)),
title,
self.clean(&extract.replace("\n", " "))
))
) )
}
}
#[inline]
fn encode(s: &str) -> String {
util::encode(&s.replace(" ", "_"))
}
fn parse_page(json: &Value) -> Option<u64> {
json
.as_object()?
.get("query")?
.as_object()?
.get("search")?
.as_array()?
.get(0)?
.as_object()?
.get("pageid")?
.as_u64()
}
fn parse_link(json: &Value) -> Option<String> {
let link = json
.as_object()?
.get("title")?
.as_str()?;
if link.contains("disambiguation") {
None
} else {
Some(link.to_owned())
}
}
fn parse_disambig(title_up: &str, json: &Map<String, Value>) -> Option<Vec<String>> {
let title = format!("{} (", title_up.to_lowercase());
let links = json
.get("links")?
.as_array()?
.into_iter()
.filter_map(parse_link);
let mut verbatim = links.clone()
.filter(|x| x.to_lowercase().starts_with(&title))
.peekable();
if verbatim.peek().is_some() {
Some(verbatim.collect())
} else {
Some(links.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn searches() {
assert_eq!(Wikipedia::new().test_def("Monty Oum").unwrap(), "https://en.wikipedia.org/wiki/Monty_Oum \x02Monty Oum\x02: Monyreak \"Monty\" Oum was an American web-based animator and writer. A self-taught animator, he scripted and produced several crossover fighting video series, drawing the attention of internet production company Rooster Teeth, who hired him. […]");
}
#[test]
fn disambiguates() {
assert!(Wikipedia::new().test_def("Rock").is_err());
}
}
| 30.48 | 378 | 0.524278 |
2954f6123b29221f49a0af9c8bfc4d6999720300
| 5,364 |
use super::*;
pick! {
if #[cfg(target_feature="sse2")] {
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[repr(C, align(16))]
pub struct i64x2 { sse: m128i }
} else {
#[derive(Default, Clone, Copy, PartialEq, Eq)]
#[repr(C, align(16))]
pub struct i64x2 { arr: [i64;2] }
}
}
unsafe impl Zeroable for i64x2 {}
unsafe impl Pod for i64x2 {}
impl Add for i64x2 {
type Output = Self;
#[inline]
#[must_use]
fn add(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="sse2")] {
Self { sse: add_i64_m128i(self.sse, rhs.sse) }
} else {
Self { arr: [
self.arr[0].wrapping_add(rhs.arr[0]),
self.arr[1].wrapping_add(rhs.arr[1]),
]}
}
}
}
}
impl Sub for i64x2 {
type Output = Self;
#[inline]
#[must_use]
fn sub(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="sse2")] {
Self { sse: sub_i64_m128i(self.sse, rhs.sse) }
} else {
Self { arr: [
self.arr[0].wrapping_sub(rhs.arr[0]),
self.arr[1].wrapping_sub(rhs.arr[1]),
]}
}
}
}
}
impl BitAnd for i64x2 {
type Output = Self;
#[inline]
#[must_use]
fn bitand(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="sse2")] {
Self { sse: bitand_m128i(self.sse, rhs.sse) }
} else {
Self { arr: [
self.arr[0].bitand(rhs.arr[0]),
self.arr[1].bitand(rhs.arr[1]),
]}
}
}
}
}
impl BitOr for i64x2 {
type Output = Self;
#[inline]
#[must_use]
fn bitor(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="sse2")] {
Self { sse: bitor_m128i(self.sse, rhs.sse) }
} else {
Self { arr: [
self.arr[0].bitor(rhs.arr[0]),
self.arr[1].bitor(rhs.arr[1]),
]}
}
}
}
}
impl BitXor for i64x2 {
type Output = Self;
#[inline]
#[must_use]
fn bitxor(self, rhs: Self) -> Self::Output {
pick! {
if #[cfg(target_feature="sse2")] {
Self { sse: bitxor_m128i(self.sse, rhs.sse) }
} else {
Self { arr: [
self.arr[0].bitxor(rhs.arr[0]),
self.arr[1].bitxor(rhs.arr[1]),
]}
}
}
}
}
macro_rules! impl_shl_t_for_i64x2 {
($($shift_type:ty),+ $(,)?) => {
$(impl Shl<$shift_type> for i64x2 {
type Output = Self;
/// Shifts all lanes by the value given.
#[inline]
#[must_use]
fn shl(self, rhs: $shift_type) -> Self::Output {
let u = rhs as u64;
pick! {
if #[cfg(target_feature="sse2")] {
let shift = cast([u, 0]);
Self { sse: shl_all_u64_m128i(self.sse, shift) }
} else {
Self { arr: [
self.arr[0] << u,
self.arr[1] << u,
]}
}
}
}
})+
};
}
impl_shl_t_for_i64x2!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
macro_rules! impl_shr_t_for_i64x2 {
($($shift_type:ty),+ $(,)?) => {
$(impl Shr<$shift_type> for i64x2 {
type Output = Self;
/// Shifts all lanes by the value given.
#[inline]
#[must_use]
fn shr(self, rhs: $shift_type) -> Self::Output {
let u = rhs as u64;
pick! {
if #[cfg(target_feature="sse2")] {
let shift = cast([u, 0]);
Self { sse: shr_all_u64_m128i(self.sse, shift) }
} else {
Self { arr: [
self.arr[0] >> u,
self.arr[1] >> u,
]}
}
}
}
})+
};
}
impl_shr_t_for_i64x2!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128);
impl i64x2 {
#[inline]
#[must_use]
pub fn cmp_eq(self, rhs: Self) -> Self {
pick! {
if #[cfg(target_feature="sse4.1")] {
Self { sse: cmp_eq_mask_i64_m128i(self.sse, rhs.sse) }
} else {
let s: [i64;2] = cast(self);
let r: [i64;2] = cast(rhs);
cast([
if s[0] == r[0] { -1_i64 } else { 0 },
if s[1] == r[1] { -1_i64 } else { 0 },
])
}
}
}
#[inline]
#[must_use]
pub fn cmp_gt(self, rhs: Self) -> Self {
pick! {
if #[cfg(target_feature="sse4.2")] {
Self { sse: cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
} else {
let s: [i64;2] = cast(self);
let r: [i64;2] = cast(rhs);
cast([
if s[0] > r[0] { -1_i64 } else { 0 },
if s[1] > r[1] { -1_i64 } else { 0 },
])
}
}
}
#[inline]
#[must_use]
pub fn cmp_lt(self, rhs: Self) -> Self {
pick! {
if #[cfg(target_feature="sse4.2")] {
Self { sse: !cmp_gt_mask_i64_m128i(self.sse, rhs.sse) }
} else {
let s: [i64;2] = cast(self);
let r: [i64;2] = cast(rhs);
cast([
if s[0] < r[0] { -1_i64 } else { 0 },
if s[1] < r[1] { -1_i64 } else { 0 },
])
}
}
}
#[inline]
#[must_use]
pub fn blend(self, t: Self, f: Self) -> Self {
pick! {
if #[cfg(target_feature="sse4.1")] {
Self { sse: blend_varying_i8_m128i(f.sse, t.sse, self.sse) }
} else {
generic_bit_blend(self, t, f)
}
}
}
#[inline]
#[must_use]
pub fn round_float(self) -> f64x2 {
let arr: [i64; 2] = cast(self);
cast([arr[0] as f64, arr[1] as f64])
}
}
| 23.220779 | 72 | 0.481357 |
dd8c7e9660af78945e5954a12621e2e20254ace9
| 69,644 |
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// ignore-lexer-test FIXME #15679
//! String manipulation
//!
//! For more details, see std::str
#![doc(primitive = "str")]
use mem;
use char;
use char::Char;
use clone::Clone;
use cmp;
use cmp::{PartialEq, Eq};
use default::Default;
use iter::{Map, Iterator};
use iter::{DoubleEndedIterator, ExactSize};
use iter::range;
use kinds::Sized;
use num::{CheckedMul, Saturating};
use option::{Option, None, Some};
use raw::Repr;
use slice::ImmutableSlice;
use slice;
use uint;
/*
Section: Creating a string
*/
/// Converts a vector to a string slice without performing any allocations.
///
/// Once the slice has been validated as utf-8, it is transmuted in-place and
/// returned as a '&str' instead of a '&[u8]'
///
/// Returns None if the slice is not utf-8.
pub fn from_utf8<'a>(v: &'a [u8]) -> Option<&'a str> {
if is_utf8(v) {
Some(unsafe { raw::from_utf8(v) })
} else { None }
}
/// Something that can be used to compare against a character
pub trait CharEq {
/// Determine if the splitter should split at the given character
fn matches(&mut self, char) -> bool;
/// Indicate if this is only concerned about ASCII characters,
/// which can allow for a faster implementation.
fn only_ascii(&self) -> bool;
}
impl CharEq for char {
#[inline]
fn matches(&mut self, c: char) -> bool { *self == c }
#[inline]
fn only_ascii(&self) -> bool { (*self as uint) < 128 }
}
impl<'a> CharEq for |char|: 'a -> bool {
#[inline]
fn matches(&mut self, c: char) -> bool { (*self)(c) }
#[inline]
fn only_ascii(&self) -> bool { false }
}
impl CharEq for extern "Rust" fn(char) -> bool {
#[inline]
fn matches(&mut self, c: char) -> bool { (*self)(c) }
#[inline]
fn only_ascii(&self) -> bool { false }
}
impl<'a> CharEq for &'a [char] {
#[inline]
fn matches(&mut self, c: char) -> bool {
self.iter().any(|&mut m| m.matches(c))
}
#[inline]
fn only_ascii(&self) -> bool {
self.iter().all(|m| m.only_ascii())
}
}
/*
Section: Iterators
*/
/// Iterator for the char (representing *Unicode Scalar Values*) of a string
///
/// Created with the method `.chars()`.
#[deriving(Clone)]
pub struct Chars<'a> {
iter: slice::Items<'a, u8>
}
// Return the initial codepoint accumulator for the first byte.
// The first byte is special, only want bottom 5 bits for width 2, 4 bits
// for width 3, and 3 bits for width 4
macro_rules! utf8_first_byte(
($byte:expr, $width:expr) => (($byte & (0x7F >> $width)) as u32)
)
// return the value of $ch updated with continuation byte $byte
macro_rules! utf8_acc_cont_byte(
($ch:expr, $byte:expr) => (($ch << 6) | ($byte & CONT_MASK) as u32)
)
macro_rules! utf8_is_cont_byte(
($byte:expr) => (($byte & !CONT_MASK) == TAG_CONT_U8)
)
#[inline]
fn unwrap_or_0(opt: Option<&u8>) -> u8 {
match opt {
Some(&byte) => byte,
None => 0,
}
}
impl<'a> Iterator<char> for Chars<'a> {
#[inline]
fn next(&mut self) -> Option<char> {
// Decode UTF-8, using the valid UTF-8 invariant
let x = match self.iter.next() {
None => return None,
Some(&next_byte) if next_byte < 128 => return Some(next_byte as char),
Some(&next_byte) => next_byte,
};
// Multibyte case follows
// Decode from a byte combination out of: [[[x y] z] w]
// NOTE: Performance is sensitive to the exact formulation here
let init = utf8_first_byte!(x, 2);
let y = unwrap_or_0(self.iter.next());
let mut ch = utf8_acc_cont_byte!(init, y);
if x >= 0xE0 {
// [[x y z] w] case
// 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
let z = unwrap_or_0(self.iter.next());
let y_z = utf8_acc_cont_byte!((y & CONT_MASK) as u32, z);
ch = init << 12 | y_z;
if x >= 0xF0 {
// [x y z w] case
// use only the lower 3 bits of `init`
let w = unwrap_or_0(self.iter.next());
ch = (init & 7) << 18 | utf8_acc_cont_byte!(y_z, w);
}
}
// str invariant says `ch` is a valid Unicode Scalar Value
unsafe {
Some(mem::transmute(ch))
}
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
let (len, _) = self.iter.size_hint();
(len.saturating_add(3) / 4, Some(len))
}
}
impl<'a> DoubleEndedIterator<char> for Chars<'a> {
#[inline]
fn next_back(&mut self) -> Option<char> {
let w = match self.iter.next_back() {
None => return None,
Some(&back_byte) if back_byte < 128 => return Some(back_byte as char),
Some(&back_byte) => back_byte,
};
// Multibyte case follows
// Decode from a byte combination out of: [x [y [z w]]]
let mut ch;
let z = unwrap_or_0(self.iter.next_back());
ch = utf8_first_byte!(z, 2);
if utf8_is_cont_byte!(z) {
let y = unwrap_or_0(self.iter.next_back());
ch = utf8_first_byte!(y, 3);
if utf8_is_cont_byte!(y) {
let x = unwrap_or_0(self.iter.next_back());
ch = utf8_first_byte!(x, 4);
ch = utf8_acc_cont_byte!(ch, y);
}
ch = utf8_acc_cont_byte!(ch, z);
}
ch = utf8_acc_cont_byte!(ch, w);
// str invariant says `ch` is a valid Unicode Scalar Value
unsafe {
Some(mem::transmute(ch))
}
}
}
/// External iterator for a string's characters and their byte offsets.
/// Use with the `std::iter` module.
#[deriving(Clone)]
pub struct CharOffsets<'a> {
front_offset: uint,
iter: Chars<'a>,
}
impl<'a> Iterator<(uint, char)> for CharOffsets<'a> {
#[inline]
fn next(&mut self) -> Option<(uint, char)> {
let (pre_len, _) = self.iter.iter.size_hint();
match self.iter.next() {
None => None,
Some(ch) => {
let index = self.front_offset;
let (len, _) = self.iter.iter.size_hint();
self.front_offset += pre_len - len;
Some((index, ch))
}
}
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
self.iter.size_hint()
}
}
impl<'a> DoubleEndedIterator<(uint, char)> for CharOffsets<'a> {
#[inline]
fn next_back(&mut self) -> Option<(uint, char)> {
match self.iter.next_back() {
None => None,
Some(ch) => {
let (len, _) = self.iter.iter.size_hint();
let index = self.front_offset + len;
Some((index, ch))
}
}
}
}
/// External iterator for a string's bytes.
/// Use with the `std::iter` module.
pub type Bytes<'a> =
Map<'a, &'a u8, u8, slice::Items<'a, u8>>;
/// An iterator over the substrings of a string, separated by `sep`.
#[deriving(Clone)]
pub struct CharSplits<'a, Sep> {
/// The slice remaining to be iterated
string: &'a str,
sep: Sep,
/// Whether an empty string at the end is allowed
allow_trailing_empty: bool,
only_ascii: bool,
finished: bool,
}
/// An iterator over the substrings of a string, separated by `sep`,
/// splitting at most `count` times.
#[deriving(Clone)]
pub struct CharSplitsN<'a, Sep> {
iter: CharSplits<'a, Sep>,
/// The number of splits remaining
count: uint,
invert: bool,
}
/// An iterator over the lines of a string, separated by either `\n` or (`\r\n`).
pub type AnyLines<'a> =
Map<'a, &'a str, &'a str, CharSplits<'a, char>>;
impl<'a, Sep> CharSplits<'a, Sep> {
#[inline]
fn get_end(&mut self) -> Option<&'a str> {
if !self.finished && (self.allow_trailing_empty || self.string.len() > 0) {
self.finished = true;
Some(self.string)
} else {
None
}
}
}
impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplits<'a, Sep> {
#[inline]
fn next(&mut self) -> Option<&'a str> {
if self.finished { return None }
let mut next_split = None;
if self.only_ascii {
for (idx, byte) in self.string.bytes().enumerate() {
if self.sep.matches(byte as char) && byte < 128u8 {
next_split = Some((idx, idx + 1));
break;
}
}
} else {
for (idx, ch) in self.string.char_indices() {
if self.sep.matches(ch) {
next_split = Some((idx, self.string.char_range_at(idx).next));
break;
}
}
}
match next_split {
Some((a, b)) => unsafe {
let elt = raw::slice_unchecked(self.string, 0, a);
self.string = raw::slice_unchecked(self.string, b, self.string.len());
Some(elt)
},
None => self.get_end(),
}
}
}
impl<'a, Sep: CharEq> DoubleEndedIterator<&'a str>
for CharSplits<'a, Sep> {
#[inline]
fn next_back(&mut self) -> Option<&'a str> {
if self.finished { return None }
if !self.allow_trailing_empty {
self.allow_trailing_empty = true;
match self.next_back() {
Some(elt) if !elt.is_empty() => return Some(elt),
_ => if self.finished { return None }
}
}
let len = self.string.len();
let mut next_split = None;
if self.only_ascii {
for (idx, byte) in self.string.bytes().enumerate().rev() {
if self.sep.matches(byte as char) && byte < 128u8 {
next_split = Some((idx, idx + 1));
break;
}
}
} else {
for (idx, ch) in self.string.char_indices().rev() {
if self.sep.matches(ch) {
next_split = Some((idx, self.string.char_range_at(idx).next));
break;
}
}
}
match next_split {
Some((a, b)) => unsafe {
let elt = raw::slice_unchecked(self.string, b, len);
self.string = raw::slice_unchecked(self.string, 0, a);
Some(elt)
},
None => { self.finished = true; Some(self.string) }
}
}
}
impl<'a, Sep: CharEq> Iterator<&'a str> for CharSplitsN<'a, Sep> {
#[inline]
fn next(&mut self) -> Option<&'a str> {
if self.count != 0 {
self.count -= 1;
if self.invert { self.iter.next_back() } else { self.iter.next() }
} else {
self.iter.get_end()
}
}
}
/// The internal state of an iterator that searches for matches of a substring
/// within a larger string using naive search
#[deriving(Clone)]
struct NaiveSearcher {
position: uint
}
impl NaiveSearcher {
fn new() -> NaiveSearcher {
NaiveSearcher { position: 0 }
}
fn next(&mut self, haystack: &[u8], needle: &[u8]) -> Option<(uint, uint)> {
while self.position + needle.len() <= haystack.len() {
if haystack[self.position .. self.position + needle.len()] == needle {
let match_pos = self.position;
self.position += needle.len(); // add 1 for all matches
return Some((match_pos, match_pos + needle.len()));
} else {
self.position += 1;
}
}
None
}
}
/// The internal state of an iterator that searches for matches of a substring
/// within a larger string using two-way search
#[deriving(Clone)]
struct TwoWaySearcher {
// constants
crit_pos: uint,
period: uint,
byteset: u64,
// variables
position: uint,
memory: uint
}
/*
This is the Two-Way search algorithm, which was introduced in the paper:
Crochemore, M., Perrin, D., 1991, Two-way string-matching, Journal of the ACM 38(3):651-675.
Here's some background information.
A *word* is a string of symbols. The *length* of a word should be a familiar
notion, and here we denote it for any word x by |x|.
(We also allow for the possibility of the *empty word*, a word of length zero).
If x is any non-empty word, then an integer p with 0 < p <= |x| is said to be a
*period* for x iff for all i with 0 <= i <= |x| - p - 1, we have x[i] == x[i+p].
For example, both 1 and 2 are periods for the string "aa". As another example,
the only period of the string "abcd" is 4.
We denote by period(x) the *smallest* period of x (provided that x is non-empty).
This is always well-defined since every non-empty word x has at least one period,
|x|. We sometimes call this *the period* of x.
If u, v and x are words such that x = uv, where uv is the concatenation of u and
v, then we say that (u, v) is a *factorization* of x.
Let (u, v) be a factorization for a word x. Then if w is a non-empty word such
that both of the following hold
- either w is a suffix of u or u is a suffix of w
- either w is a prefix of v or v is a prefix of w
then w is said to be a *repetition* for the factorization (u, v).
Just to unpack this, there are four possibilities here. Let w = "abc". Then we
might have:
- w is a suffix of u and w is a prefix of v. ex: ("lolabc", "abcde")
- w is a suffix of u and v is a prefix of w. ex: ("lolabc", "ab")
- u is a suffix of w and w is a prefix of v. ex: ("bc", "abchi")
- u is a suffix of w and v is a prefix of w. ex: ("bc", "a")
Note that the word vu is a repetition for any factorization (u,v) of x = uv,
so every factorization has at least one repetition.
If x is a string and (u, v) is a factorization for x, then a *local period* for
(u, v) is an integer r such that there is some word w such that |w| = r and w is
a repetition for (u, v).
We denote by local_period(u, v) the smallest local period of (u, v). We sometimes
call this *the local period* of (u, v). Provided that x = uv is non-empty, this
is well-defined (because each non-empty word has at least one factorization, as
noted above).
It can be proven that the following is an equivalent definition of a local period
for a factorization (u, v): any positive integer r such that x[i] == x[i+r] for
all i such that |u| - r <= i <= |u| - 1 and such that both x[i] and x[i+r] are
defined. (i.e. i > 0 and i + r < |x|).
Using the above reformulation, it is easy to prove that
1 <= local_period(u, v) <= period(uv)
A factorization (u, v) of x such that local_period(u,v) = period(x) is called a
*critical factorization*.
The algorithm hinges on the following theorem, which is stated without proof:
**Critical Factorization Theorem** Any word x has at least one critical
factorization (u, v) such that |u| < period(x).
The purpose of maximal_suffix is to find such a critical factorization.
*/
impl TwoWaySearcher {
fn new(needle: &[u8]) -> TwoWaySearcher {
let (crit_pos1, period1) = TwoWaySearcher::maximal_suffix(needle, false);
let (crit_pos2, period2) = TwoWaySearcher::maximal_suffix(needle, true);
let crit_pos;
let period;
if crit_pos1 > crit_pos2 {
crit_pos = crit_pos1;
period = period1;
} else {
crit_pos = crit_pos2;
period = period2;
}
// This isn't in the original algorithm, as far as I'm aware.
let byteset = needle.iter()
.fold(0, |a, &b| (1 << ((b & 0x3f) as uint)) | a);
// A particularly readable explanation of what's going on here can be found
// in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
// see the code for "Algorithm CP" on p. 323.
//
// What's going on is we have some critical factorization (u, v) of the
// needle, and we want to determine whether u is a suffix of
// v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
// "Algorithm CP2", which is optimized for when the period of the needle
// is large.
if needle[..crit_pos] == needle[period.. period + crit_pos] {
TwoWaySearcher {
crit_pos: crit_pos,
period: period,
byteset: byteset,
position: 0,
memory: 0
}
} else {
TwoWaySearcher {
crit_pos: crit_pos,
period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
byteset: byteset,
position: 0,
memory: uint::MAX // Dummy value to signify that the period is long
}
}
}
// One of the main ideas of Two-Way is that we factorize the needle into
// two halves, (u, v), and begin trying to find v in the haystack by scanning
// left to right. If v matches, we try to match u by scanning right to left.
// How far we can jump when we encounter a mismatch is all based on the fact
// that (u, v) is a critical factorization for the needle.
#[inline]
fn next(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> Option<(uint, uint)> {
'search: loop {
// Check that we have room to search in
if self.position + needle.len() > haystack.len() {
return None;
}
// Quickly skip by large portions unrelated to our substring
if (self.byteset >>
((haystack[self.position + needle.len() - 1] & 0x3f)
as uint)) & 1 == 0 {
self.position += needle.len();
if !long_period {
self.memory = 0;
}
continue 'search;
}
// See if the right part of the needle matches
let start = if long_period { self.crit_pos }
else { cmp::max(self.crit_pos, self.memory) };
for i in range(start, needle.len()) {
if needle[i] != haystack[self.position + i] {
self.position += i - self.crit_pos + 1;
if !long_period {
self.memory = 0;
}
continue 'search;
}
}
// See if the left part of the needle matches
let start = if long_period { 0 } else { self.memory };
for i in range(start, self.crit_pos).rev() {
if needle[i] != haystack[self.position + i] {
self.position += self.period;
if !long_period {
self.memory = needle.len() - self.period;
}
continue 'search;
}
}
// We have found a match!
let match_pos = self.position;
self.position += needle.len(); // add self.period for all matches
if !long_period {
self.memory = 0; // set to needle.len() - self.period for all matches
}
return Some((match_pos, match_pos + needle.len()));
}
}
// Computes a critical factorization (u, v) of `arr`.
// Specifically, returns (i, p), where i is the starting index of v in some
// critical factorization (u, v) and p = period(v)
#[inline]
fn maximal_suffix(arr: &[u8], reversed: bool) -> (uint, uint) {
let mut left = -1; // Corresponds to i in the paper
let mut right = 0; // Corresponds to j in the paper
let mut offset = 1; // Corresponds to k in the paper
let mut period = 1; // Corresponds to p in the paper
while right + offset < arr.len() {
let a;
let b;
if reversed {
a = arr[left + offset];
b = arr[right + offset];
} else {
a = arr[right + offset];
b = arr[left + offset];
}
if a < b {
// Suffix is smaller, period is entire prefix so far.
right += offset;
offset = 1;
period = right - left;
} else if a == b {
// Advance through repetition of the current period.
if offset == period {
right += offset;
offset = 1;
} else {
offset += 1;
}
} else {
// Suffix is larger, start over from current location.
left = right;
right += 1;
offset = 1;
period = 1;
}
}
(left + 1, period)
}
}
/// The internal state of an iterator that searches for matches of a substring
/// within a larger string using a dynamically chosen search algorithm
#[deriving(Clone)]
enum Searcher {
Naive(NaiveSearcher),
TwoWay(TwoWaySearcher),
TwoWayLong(TwoWaySearcher)
}
impl Searcher {
fn new(haystack: &[u8], needle: &[u8]) -> Searcher {
// FIXME: Tune this.
// FIXME(#16715): This unsigned integer addition will probably not
// overflow because that would mean that the memory almost solely
// consists of the needle. Needs #16715 to be formally fixed.
if needle.len() + 20 > haystack.len() {
Naive(NaiveSearcher::new())
} else {
let searcher = TwoWaySearcher::new(needle);
if searcher.memory == uint::MAX { // If the period is long
TwoWayLong(searcher)
} else {
TwoWay(searcher)
}
}
}
}
/// An iterator over the start and end indices of the matches of a
/// substring within a larger string
#[deriving(Clone)]
pub struct MatchIndices<'a> {
// constants
haystack: &'a str,
needle: &'a str,
searcher: Searcher
}
/// An iterator over the substrings of a string separated by a given
/// search string
#[deriving(Clone)]
pub struct StrSplits<'a> {
it: MatchIndices<'a>,
last_end: uint,
finished: bool
}
impl<'a> Iterator<(uint, uint)> for MatchIndices<'a> {
#[inline]
fn next(&mut self) -> Option<(uint, uint)> {
match self.searcher {
Naive(ref mut searcher)
=> searcher.next(self.haystack.as_bytes(), self.needle.as_bytes()),
TwoWay(ref mut searcher)
=> searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), false),
TwoWayLong(ref mut searcher)
=> searcher.next(self.haystack.as_bytes(), self.needle.as_bytes(), true)
}
}
}
impl<'a> Iterator<&'a str> for StrSplits<'a> {
#[inline]
fn next(&mut self) -> Option<&'a str> {
if self.finished { return None; }
match self.it.next() {
Some((from, to)) => {
let ret = Some(self.it.haystack.slice(self.last_end, from));
self.last_end = to;
ret
}
None => {
self.finished = true;
Some(self.it.haystack.slice(self.last_end, self.it.haystack.len()))
}
}
}
}
/// External iterator for a string's UTF16 codeunits.
/// Use with the `std::iter` module.
#[deriving(Clone)]
pub struct Utf16CodeUnits<'a> {
chars: Chars<'a>,
extra: u16
}
impl<'a> Iterator<u16> for Utf16CodeUnits<'a> {
#[inline]
fn next(&mut self) -> Option<u16> {
if self.extra != 0 {
let tmp = self.extra;
self.extra = 0;
return Some(tmp);
}
let mut buf = [0u16, ..2];
self.chars.next().map(|ch| {
let n = ch.encode_utf16(buf[mut]).unwrap_or(0);
if n == 2 { self.extra = buf[1]; }
buf[0]
})
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
let (low, high) = self.chars.size_hint();
// every char gets either one u16 or two u16,
// so this iterator is between 1 or 2 times as
// long as the underlying iterator.
(low, high.and_then(|n| n.checked_mul(&2)))
}
}
/*
Section: Comparing strings
*/
// share the implementation of the lang-item vs. non-lang-item
// eq_slice.
/// NOTE: This function is (ab)used in rustc::middle::trans::_match
/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
#[inline]
fn eq_slice_(a: &str, b: &str) -> bool {
#[allow(improper_ctypes)]
extern { fn memcmp(s1: *const i8, s2: *const i8, n: uint) -> i32; }
a.len() == b.len() && unsafe {
memcmp(a.as_ptr() as *const i8,
b.as_ptr() as *const i8,
a.len()) == 0
}
}
/// Bytewise slice equality
/// NOTE: This function is (ab)used in rustc::middle::trans::_match
/// to compare &[u8] byte slices that are not necessarily valid UTF-8.
#[lang="str_eq"]
#[inline]
pub fn eq_slice(a: &str, b: &str) -> bool {
eq_slice_(a, b)
}
/*
Section: Misc
*/
/// Walk through `iter` checking that it's a valid UTF-8 sequence,
/// returning `true` in that case, or, if it is invalid, `false` with
/// `iter` reset such that it is pointing at the first byte in the
/// invalid sequence.
#[inline(always)]
fn run_utf8_validation_iterator(iter: &mut slice::Items<u8>) -> bool {
loop {
// save the current thing we're pointing at.
let old = *iter;
// restore the iterator we had at the start of this codepoint.
macro_rules! err ( () => { {*iter = old; return false} });
macro_rules! next ( () => {
match iter.next() {
Some(a) => *a,
// we needed data, but there was none: error!
None => err!()
}
});
let first = match iter.next() {
Some(&b) => b,
// we're at the end of the iterator and a codepoint
// boundary at the same time, so this string is valid.
None => return true
};
// ASCII characters are always valid, so only large
// bytes need more examination.
if first >= 128 {
let w = utf8_char_width(first);
let second = next!();
// 2-byte encoding is for codepoints \u0080 to \u07ff
// first C2 80 last DF BF
// 3-byte encoding is for codepoints \u0800 to \uffff
// first E0 A0 80 last EF BF BF
// excluding surrogates codepoints \ud800 to \udfff
// ED A0 80 to ED BF BF
// 4-byte encoding is for codepoints \u10000 to \u10ffff
// first F0 90 80 80 last F4 8F BF BF
//
// Use the UTF-8 syntax from the RFC
//
// https://tools.ietf.org/html/rfc3629
// UTF8-1 = %x00-7F
// UTF8-2 = %xC2-DF UTF8-tail
// UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
// %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
// UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
// %xF4 %x80-8F 2( UTF8-tail )
match w {
2 => if second & !CONT_MASK != TAG_CONT_U8 {err!()},
3 => {
match (first, second, next!() & !CONT_MASK) {
(0xE0 , 0xA0 ... 0xBF, TAG_CONT_U8) |
(0xE1 ... 0xEC, 0x80 ... 0xBF, TAG_CONT_U8) |
(0xED , 0x80 ... 0x9F, TAG_CONT_U8) |
(0xEE ... 0xEF, 0x80 ... 0xBF, TAG_CONT_U8) => {}
_ => err!()
}
}
4 => {
match (first, second, next!() & !CONT_MASK, next!() & !CONT_MASK) {
(0xF0 , 0x90 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
(0xF1 ... 0xF3, 0x80 ... 0xBF, TAG_CONT_U8, TAG_CONT_U8) |
(0xF4 , 0x80 ... 0x8F, TAG_CONT_U8, TAG_CONT_U8) => {}
_ => err!()
}
}
_ => err!()
}
}
}
}
/// Determines if a vector of bytes contains valid UTF-8.
pub fn is_utf8(v: &[u8]) -> bool {
run_utf8_validation_iterator(&mut v.iter())
}
/// Determines if a vector of `u16` contains valid UTF-16
pub fn is_utf16(v: &[u16]) -> bool {
let mut it = v.iter();
macro_rules! next ( ($ret:expr) => {
match it.next() { Some(u) => *u, None => return $ret }
}
)
loop {
let u = next!(true);
match char::from_u32(u as u32) {
Some(_) => {}
None => {
let u2 = next!(false);
if u < 0xD7FF || u > 0xDBFF ||
u2 < 0xDC00 || u2 > 0xDFFF { return false; }
}
}
}
}
/// An iterator that decodes UTF-16 encoded codepoints from a vector
/// of `u16`s.
#[deriving(Clone)]
pub struct Utf16Items<'a> {
iter: slice::Items<'a, u16>
}
/// The possibilities for values decoded from a `u16` stream.
#[deriving(PartialEq, Eq, Clone, Show)]
pub enum Utf16Item {
/// A valid codepoint.
ScalarValue(char),
/// An invalid surrogate without its pair.
LoneSurrogate(u16)
}
impl Utf16Item {
/// Convert `self` to a `char`, taking `LoneSurrogate`s to the
/// replacement character (U+FFFD).
#[inline]
pub fn to_char_lossy(&self) -> char {
match *self {
ScalarValue(c) => c,
LoneSurrogate(_) => '\uFFFD'
}
}
}
impl<'a> Iterator<Utf16Item> for Utf16Items<'a> {
fn next(&mut self) -> Option<Utf16Item> {
let u = match self.iter.next() {
Some(u) => *u,
None => return None
};
if u < 0xD800 || 0xDFFF < u {
// not a surrogate
Some(ScalarValue(unsafe {mem::transmute(u as u32)}))
} else if u >= 0xDC00 {
// a trailing surrogate
Some(LoneSurrogate(u))
} else {
// preserve state for rewinding.
let old = self.iter;
let u2 = match self.iter.next() {
Some(u2) => *u2,
// eof
None => return Some(LoneSurrogate(u))
};
if u2 < 0xDC00 || u2 > 0xDFFF {
// not a trailing surrogate so we're not a valid
// surrogate pair, so rewind to redecode u2 next time.
self.iter = old;
return Some(LoneSurrogate(u))
}
// all ok, so lets decode it.
let c = ((u - 0xD800) as u32 << 10 | (u2 - 0xDC00) as u32) + 0x1_0000;
Some(ScalarValue(unsafe {mem::transmute(c)}))
}
}
#[inline]
fn size_hint(&self) -> (uint, Option<uint>) {
let (low, high) = self.iter.size_hint();
// we could be entirely valid surrogates (2 elements per
// char), or entirely non-surrogates (1 element per char)
(low / 2, high)
}
}
/// Create an iterator over the UTF-16 encoded codepoints in `v`,
/// returning invalid surrogates as `LoneSurrogate`s.
///
/// # Example
///
/// ```rust
/// use std::str;
/// use std::str::{ScalarValue, LoneSurrogate};
///
/// // 𝄞mus<invalid>ic<invalid>
/// let v = [0xD834, 0xDD1E, 0x006d, 0x0075,
/// 0x0073, 0xDD1E, 0x0069, 0x0063,
/// 0xD834];
///
/// assert_eq!(str::utf16_items(v).collect::<Vec<_>>(),
/// vec![ScalarValue('𝄞'),
/// ScalarValue('m'), ScalarValue('u'), ScalarValue('s'),
/// LoneSurrogate(0xDD1E),
/// ScalarValue('i'), ScalarValue('c'),
/// LoneSurrogate(0xD834)]);
/// ```
pub fn utf16_items<'a>(v: &'a [u16]) -> Utf16Items<'a> {
Utf16Items { iter : v.iter() }
}
/// Return a slice of `v` ending at (and not including) the first NUL
/// (0).
///
/// # Example
///
/// ```rust
/// use std::str;
///
/// // "abcd"
/// let mut v = ['a' as u16, 'b' as u16, 'c' as u16, 'd' as u16];
/// // no NULs so no change
/// assert_eq!(str::truncate_utf16_at_nul(v), v.as_slice());
///
/// // "ab\0d"
/// v[2] = 0;
/// let b: &[_] = &['a' as u16, 'b' as u16];
/// assert_eq!(str::truncate_utf16_at_nul(v), b);
/// ```
pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] {
match v.iter().position(|c| *c == 0) {
// don't include the 0
Some(i) => v[..i],
None => v
}
}
// https://tools.ietf.org/html/rfc3629
static UTF8_CHAR_WIDTH: [u8, ..256] = [
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x1F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x3F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x5F
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, // 0x7F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0x9F
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, // 0xBF
0,0,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, // 0xDF
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3, // 0xEF
4,4,4,4,4,0,0,0,0,0,0,0,0,0,0,0, // 0xFF
];
/// Given a first byte, determine how many bytes are in this UTF-8 character
#[inline]
pub fn utf8_char_width(b: u8) -> uint {
return UTF8_CHAR_WIDTH[b as uint] as uint;
}
/// Struct that contains a `char` and the index of the first byte of
/// the next `char` in a string. This can be used as a data structure
/// for iterating over the UTF-8 bytes of a string.
pub struct CharRange {
/// Current `char`
pub ch: char,
/// Index of the first byte of the next `char`
pub next: uint,
}
/// Mask of the value bits of a continuation byte
const CONT_MASK: u8 = 0b0011_1111u8;
/// Value of the tag bits (tag mask is !CONT_MASK) of a continuation byte
const TAG_CONT_U8: u8 = 0b1000_0000u8;
/// Unsafe operations
pub mod raw {
use mem;
use ptr::RawPtr;
use raw::Slice;
use slice::{ImmutableSlice};
use str::{is_utf8, StrSlice};
/// Converts a slice of bytes to a string slice without checking
/// that the string contains valid UTF-8.
pub unsafe fn from_utf8<'a>(v: &'a [u8]) -> &'a str {
mem::transmute(v)
}
/// Form a slice from a C string. Unsafe because the caller must ensure the
/// C string has the static lifetime, or else the return value may be
/// invalidated later.
pub unsafe fn c_str_to_static_slice(s: *const i8) -> &'static str {
let s = s as *const u8;
let mut curr = s;
let mut len = 0u;
while *curr != 0u8 {
len += 1u;
curr = s.offset(len as int);
}
let v = Slice { data: s, len: len };
assert!(is_utf8(::mem::transmute(v)));
::mem::transmute(v)
}
/// Takes a bytewise (not UTF-8) slice from a string.
///
/// Returns the substring from [`begin`..`end`).
///
/// # Failure
///
/// If begin is greater than end.
/// If end is greater than the length of the string.
#[inline]
pub unsafe fn slice_bytes<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
assert!(begin <= end);
assert!(end <= s.len());
slice_unchecked(s, begin, end)
}
/// Takes a bytewise (not UTF-8) slice from a string.
///
/// Returns the substring from [`begin`..`end`).
///
/// Caller must check slice boundaries!
#[inline]
pub unsafe fn slice_unchecked<'a>(s: &'a str, begin: uint, end: uint) -> &'a str {
mem::transmute(Slice {
data: s.as_ptr().offset(begin as int),
len: end - begin,
})
}
}
/*
Section: Trait implementations
*/
#[allow(missing_docs)]
pub mod traits {
use cmp::{Ord, Ordering, Less, Equal, Greater, PartialEq, PartialOrd, Equiv, Eq};
use iter::Iterator;
use option::{Option, Some};
use ops;
use str::{Str, StrSlice, eq_slice};
impl<'a> Ord for &'a str {
#[inline]
fn cmp(&self, other: & &'a str) -> Ordering {
for (s_b, o_b) in self.bytes().zip(other.bytes()) {
match s_b.cmp(&o_b) {
Greater => return Greater,
Less => return Less,
Equal => ()
}
}
self.len().cmp(&other.len())
}
}
impl<'a> PartialEq for &'a str {
#[inline]
fn eq(&self, other: & &'a str) -> bool {
eq_slice((*self), (*other))
}
#[inline]
fn ne(&self, other: & &'a str) -> bool { !(*self).eq(other) }
}
impl<'a> Eq for &'a str {}
impl<'a> PartialOrd for &'a str {
#[inline]
fn partial_cmp(&self, other: &&'a str) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<S: Str> Equiv<S> for str {
#[inline]
fn equiv(&self, other: &S) -> bool { eq_slice(self, other.as_slice()) }
}
impl ops::Slice<uint, str> for str {
#[inline]
fn as_slice_<'a>(&'a self) -> &'a str {
self
}
#[inline]
fn slice_from_or_fail<'a>(&'a self, from: &uint) -> &'a str {
self.slice_from(*from)
}
#[inline]
fn slice_to_or_fail<'a>(&'a self, to: &uint) -> &'a str {
self.slice_to(*to)
}
#[inline]
fn slice_or_fail<'a>(&'a self, from: &uint, to: &uint) -> &'a str {
self.slice(*from, *to)
}
}
}
/// Any string that can be represented as a slice
pub trait Str {
/// Work with `self` as a slice.
fn as_slice<'a>(&'a self) -> &'a str;
}
impl<'a> Str for &'a str {
#[inline]
fn as_slice<'a>(&'a self) -> &'a str { *self }
}
/// Methods for string slices
pub trait StrSlice for Sized? {
/// Returns true if one string contains another
///
/// # Arguments
///
/// - needle - The string to look for
///
/// # Example
///
/// ```rust
/// assert!("bananas".contains("nana"));
/// ```
fn contains(&self, needle: &str) -> bool;
/// Returns true if a string contains a char.
///
/// # Arguments
///
/// - needle - The char to look for
///
/// # Example
///
/// ```rust
/// assert!("hello".contains_char('e'));
/// ```
fn contains_char(&self, needle: char) -> bool;
/// An iterator over the characters of `self`. Note, this iterates
/// over Unicode code-points, not Unicode graphemes.
///
/// # Example
///
/// ```rust
/// let v: Vec<char> = "abc åäö".chars().collect();
/// assert_eq!(v, vec!['a', 'b', 'c', ' ', 'å', 'ä', 'ö']);
/// ```
fn chars<'a>(&'a self) -> Chars<'a>;
/// An iterator over the bytes of `self`
///
/// # Example
///
/// ```rust
/// let v: Vec<u8> = "bors".bytes().collect();
/// assert_eq!(v, b"bors".to_vec());
/// ```
fn bytes<'a>(&'a self) -> Bytes<'a>;
/// An iterator over the characters of `self` and their byte offsets.
fn char_indices<'a>(&'a self) -> CharOffsets<'a>;
/// An iterator over substrings of `self`, separated by characters
/// matched by `sep`.
///
/// # Example
///
/// ```rust
/// let v: Vec<&str> = "Mary had a little lamb".split(' ').collect();
/// assert_eq!(v, vec!["Mary", "had", "a", "little", "lamb"]);
///
/// let v: Vec<&str> = "abc1def2ghi".split(|c: char| c.is_digit()).collect();
/// assert_eq!(v, vec!["abc", "def", "ghi"]);
///
/// let v: Vec<&str> = "lionXXtigerXleopard".split('X').collect();
/// assert_eq!(v, vec!["lion", "", "tiger", "leopard"]);
///
/// let v: Vec<&str> = "".split('X').collect();
/// assert_eq!(v, vec![""]);
/// ```
fn split<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
/// An iterator over substrings of `self`, separated by characters
/// matched by `sep`, restricted to splitting at most `count`
/// times.
///
/// # Example
///
/// ```rust
/// let v: Vec<&str> = "Mary had a little lambda".splitn(2, ' ').collect();
/// assert_eq!(v, vec!["Mary", "had", "a little lambda"]);
///
/// let v: Vec<&str> = "abc1def2ghi".splitn(1, |c: char| c.is_digit()).collect();
/// assert_eq!(v, vec!["abc", "def2ghi"]);
///
/// let v: Vec<&str> = "lionXXtigerXleopard".splitn(2, 'X').collect();
/// assert_eq!(v, vec!["lion", "", "tigerXleopard"]);
///
/// let v: Vec<&str> = "abcXdef".splitn(0, 'X').collect();
/// assert_eq!(v, vec!["abcXdef"]);
///
/// let v: Vec<&str> = "".splitn(1, 'X').collect();
/// assert_eq!(v, vec![""]);
/// ```
fn splitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
/// An iterator over substrings of `self`, separated by characters
/// matched by `sep`.
///
/// Equivalent to `split`, except that the trailing substring
/// is skipped if empty (terminator semantics).
///
/// # Example
///
/// ```rust
/// let v: Vec<&str> = "A.B.".split_terminator('.').collect();
/// assert_eq!(v, vec!["A", "B"]);
///
/// let v: Vec<&str> = "A..B..".split_terminator('.').collect();
/// assert_eq!(v, vec!["A", "", "B", ""]);
///
/// let v: Vec<&str> = "Mary had a little lamb".split(' ').rev().collect();
/// assert_eq!(v, vec!["lamb", "little", "a", "had", "Mary"]);
///
/// let v: Vec<&str> = "abc1def2ghi".split(|c: char| c.is_digit()).rev().collect();
/// assert_eq!(v, vec!["ghi", "def", "abc"]);
///
/// let v: Vec<&str> = "lionXXtigerXleopard".split('X').rev().collect();
/// assert_eq!(v, vec!["leopard", "tiger", "", "lion"]);
/// ```
fn split_terminator<'a, Sep: CharEq>(&'a self, sep: Sep) -> CharSplits<'a, Sep>;
/// An iterator over substrings of `self`, separated by characters
/// matched by `sep`, starting from the end of the string.
/// Restricted to splitting at most `count` times.
///
/// # Example
///
/// ```rust
/// let v: Vec<&str> = "Mary had a little lamb".rsplitn(2, ' ').collect();
/// assert_eq!(v, vec!["lamb", "little", "Mary had a"]);
///
/// let v: Vec<&str> = "abc1def2ghi".rsplitn(1, |c: char| c.is_digit()).collect();
/// assert_eq!(v, vec!["ghi", "abc1def"]);
///
/// let v: Vec<&str> = "lionXXtigerXleopard".rsplitn(2, 'X').collect();
/// assert_eq!(v, vec!["leopard", "tiger", "lionX"]);
/// ```
fn rsplitn<'a, Sep: CharEq>(&'a self, count: uint, sep: Sep) -> CharSplitsN<'a, Sep>;
/// An iterator over the start and end indices of the disjoint
/// matches of `sep` within `self`.
///
/// That is, each returned value `(start, end)` satisfies
/// `self.slice(start, end) == sep`. For matches of `sep` within
/// `self` that overlap, only the indices corresponding to the
/// first match are returned.
///
/// # Example
///
/// ```rust
/// let v: Vec<(uint, uint)> = "abcXXXabcYYYabc".match_indices("abc").collect();
/// assert_eq!(v, vec![(0,3), (6,9), (12,15)]);
///
/// let v: Vec<(uint, uint)> = "1abcabc2".match_indices("abc").collect();
/// assert_eq!(v, vec![(1,4), (4,7)]);
///
/// let v: Vec<(uint, uint)> = "ababa".match_indices("aba").collect();
/// assert_eq!(v, vec![(0, 3)]); // only the first `aba`
/// ```
fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a>;
/// An iterator over the substrings of `self` separated by `sep`.
///
/// # Example
///
/// ```rust
/// let v: Vec<&str> = "abcXXXabcYYYabc".split_str("abc").collect();
/// assert_eq!(v, vec!["", "XXX", "YYY", ""]);
///
/// let v: Vec<&str> = "1abcabc2".split_str("abc").collect();
/// assert_eq!(v, vec!["1", "", "2"]);
/// ```
fn split_str<'a>(&'a self, &'a str) -> StrSplits<'a>;
/// An iterator over the lines of a string (subsequences separated
/// by `\n`). This does not include the empty string after a
/// trailing `\n`.
///
/// # Example
///
/// ```rust
/// let four_lines = "foo\nbar\n\nbaz\n";
/// let v: Vec<&str> = four_lines.lines().collect();
/// assert_eq!(v, vec!["foo", "bar", "", "baz"]);
/// ```
fn lines<'a>(&'a self) -> CharSplits<'a, char>;
/// An iterator over the lines of a string, separated by either
/// `\n` or `\r\n`. As with `.lines()`, this does not include an
/// empty trailing line.
///
/// # Example
///
/// ```rust
/// let four_lines = "foo\r\nbar\n\r\nbaz\n";
/// let v: Vec<&str> = four_lines.lines_any().collect();
/// assert_eq!(v, vec!["foo", "bar", "", "baz"]);
/// ```
fn lines_any<'a>(&'a self) -> AnyLines<'a>;
/// Returns the number of Unicode code points (`char`) that a
/// string holds.
///
/// This does not perform any normalization, and is `O(n)`, since
/// UTF-8 is a variable width encoding of code points.
///
/// *Warning*: The number of code points in a string does not directly
/// correspond to the number of visible characters or width of the
/// visible text due to composing characters, and double- and
/// zero-width ones.
///
/// See also `.len()` for the byte length.
///
/// # Example
///
/// ```rust
/// // composed forms of `ö` and `é`
/// let c = "Löwe 老虎 Léopard"; // German, Simplified Chinese, French
/// // decomposed forms of `ö` and `é`
/// let d = "Lo\u0308we 老虎 Le\u0301opard";
///
/// assert_eq!(c.char_len(), 15);
/// assert_eq!(d.char_len(), 17);
///
/// assert_eq!(c.len(), 21);
/// assert_eq!(d.len(), 23);
///
/// // the two strings *look* the same
/// println!("{}", c);
/// println!("{}", d);
/// ```
fn char_len(&self) -> uint;
/// Returns a slice of the given string from the byte range
/// [`begin`..`end`).
///
/// This operation is `O(1)`.
///
/// Fails when `begin` and `end` do not point to valid characters
/// or point beyond the last character of the string.
///
/// See also `slice_to` and `slice_from` for slicing prefixes and
/// suffixes of strings, and `slice_chars` for slicing based on
/// code point counts.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
/// assert_eq!(s.slice(0, 1), "L");
///
/// assert_eq!(s.slice(1, 9), "öwe 老");
///
/// // these will panic:
/// // byte 2 lies within `ö`:
/// // s.slice(2, 3);
///
/// // byte 8 lies within `老`
/// // s.slice(1, 8);
///
/// // byte 100 is outside the string
/// // s.slice(3, 100);
/// ```
fn slice<'a>(&'a self, begin: uint, end: uint) -> &'a str;
/// Returns a slice of the string from `begin` to its end.
///
/// Equivalent to `self.slice(begin, self.len())`.
///
/// Fails when `begin` does not point to a valid character, or is
/// out of bounds.
///
/// See also `slice`, `slice_to` and `slice_chars`.
fn slice_from<'a>(&'a self, begin: uint) -> &'a str;
/// Returns a slice of the string from the beginning to byte
/// `end`.
///
/// Equivalent to `self.slice(0, end)`.
///
/// Fails when `end` does not point to a valid character, or is
/// out of bounds.
///
/// See also `slice`, `slice_from` and `slice_chars`.
fn slice_to<'a>(&'a self, end: uint) -> &'a str;
/// Returns a slice of the string from the character range
/// [`begin`..`end`).
///
/// That is, start at the `begin`-th code point of the string and
/// continue to the `end`-th code point. This does not detect or
/// handle edge cases such as leaving a combining character as the
/// first code point of the string.
///
/// Due to the design of UTF-8, this operation is `O(end)`.
/// See `slice`, `slice_to` and `slice_from` for `O(1)`
/// variants that use byte indices rather than code point
/// indices.
///
/// Fails if `begin` > `end` or the either `begin` or `end` are
/// beyond the last character of the string.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
/// assert_eq!(s.slice_chars(0, 4), "Löwe");
/// assert_eq!(s.slice_chars(5, 7), "老虎");
/// ```
fn slice_chars<'a>(&'a self, begin: uint, end: uint) -> &'a str;
/// Returns true if `needle` is a prefix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".starts_with("ba"));
/// ```
fn starts_with(&self, needle: &str) -> bool;
/// Returns true if `needle` is a suffix of the string.
///
/// # Example
///
/// ```rust
/// assert!("banana".ends_with("nana"));
/// ```
fn ends_with(&self, needle: &str) -> bool;
/// Returns a string with characters that match `to_trim` removed.
///
/// # Arguments
///
/// * to_trim - a character matcher
///
/// # Example
///
/// ```rust
/// assert_eq!("11foo1bar11".trim_chars('1'), "foo1bar")
/// let x: &[_] = &['1', '2'];
/// assert_eq!("12foo1bar12".trim_chars(x), "foo1bar")
/// assert_eq!("123foo1bar123".trim_chars(|c: char| c.is_digit()), "foo1bar")
/// ```
fn trim_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
/// Returns a string with leading `chars_to_trim` removed.
///
/// # Arguments
///
/// * to_trim - a character matcher
///
/// # Example
///
/// ```rust
/// assert_eq!("11foo1bar11".trim_left_chars('1'), "foo1bar11")
/// let x: &[_] = &['1', '2'];
/// assert_eq!("12foo1bar12".trim_left_chars(x), "foo1bar12")
/// assert_eq!("123foo1bar123".trim_left_chars(|c: char| c.is_digit()), "foo1bar123")
/// ```
fn trim_left_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
/// Returns a string with trailing `chars_to_trim` removed.
///
/// # Arguments
///
/// * to_trim - a character matcher
///
/// # Example
///
/// ```rust
/// assert_eq!("11foo1bar11".trim_right_chars('1'), "11foo1bar")
/// let x: &[_] = &['1', '2'];
/// assert_eq!("12foo1bar12".trim_right_chars(x), "12foo1bar")
/// assert_eq!("123foo1bar123".trim_right_chars(|c: char| c.is_digit()), "123foo1bar")
/// ```
fn trim_right_chars<'a, C: CharEq>(&'a self, to_trim: C) -> &'a str;
/// Check that `index`-th byte lies at the start and/or end of a
/// UTF-8 code point sequence.
///
/// The start and end of the string (when `index == self.len()`)
/// are considered to be boundaries.
///
/// Fails if `index` is greater than `self.len()`.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
/// assert!(s.is_char_boundary(0));
/// // start of `老`
/// assert!(s.is_char_boundary(6));
/// assert!(s.is_char_boundary(s.len()));
///
/// // second byte of `ö`
/// assert!(!s.is_char_boundary(2));
///
/// // third byte of `老`
/// assert!(!s.is_char_boundary(8));
/// ```
fn is_char_boundary(&self, index: uint) -> bool;
/// Pluck a character out of a string and return the index of the next
/// character.
///
/// This function can be used to iterate over the Unicode characters of a
/// string.
///
/// # Example
///
/// This example manually iterates through the characters of a
/// string; this should normally be done by `.chars()` or
/// `.char_indices`.
///
/// ```rust
/// use std::str::CharRange;
///
/// let s = "中华Việt Nam";
/// let mut i = 0u;
/// while i < s.len() {
/// let CharRange {ch, next} = s.char_range_at(i);
/// println!("{}: {}", i, ch);
/// i = next;
/// }
/// ```
///
/// ## Output
///
/// ```ignore
/// 0: 中
/// 3: 华
/// 6: V
/// 7: i
/// 8: ệ
/// 11: t
/// 12:
/// 13: N
/// 14: a
/// 15: m
/// ```
///
/// # Arguments
///
/// * s - The string
/// * i - The byte offset of the char to extract
///
/// # Return value
///
/// A record {ch: char, next: uint} containing the char value and the byte
/// index of the next Unicode character.
///
/// # Failure
///
/// If `i` is greater than or equal to the length of the string.
/// If `i` is not the index of the beginning of a valid UTF-8 character.
fn char_range_at(&self, start: uint) -> CharRange;
/// Given a byte position and a str, return the previous char and its position.
///
/// This function can be used to iterate over a Unicode string in reverse.
///
/// Returns 0 for next index if called on start index 0.
///
/// # Failure
///
/// If `i` is greater than the length of the string.
/// If `i` is not an index following a valid UTF-8 character.
fn char_range_at_reverse(&self, start: uint) -> CharRange;
/// Plucks the character starting at the `i`th byte of a string.
///
/// # Example
///
/// ```rust
/// let s = "abπc";
/// assert_eq!(s.char_at(1), 'b');
/// assert_eq!(s.char_at(2), 'π');
/// assert_eq!(s.char_at(4), 'c');
/// ```
///
/// # Failure
///
/// If `i` is greater than or equal to the length of the string.
/// If `i` is not the index of the beginning of a valid UTF-8 character.
fn char_at(&self, i: uint) -> char;
/// Plucks the character ending at the `i`th byte of a string.
///
/// # Failure
///
/// If `i` is greater than the length of the string.
/// If `i` is not an index following a valid UTF-8 character.
fn char_at_reverse(&self, i: uint) -> char;
/// Work with the byte buffer of a string as a byte slice.
///
/// # Example
///
/// ```rust
/// assert_eq!("bors".as_bytes(), b"bors");
/// ```
fn as_bytes<'a>(&'a self) -> &'a [u8];
/// Returns the byte index of the first character of `self` that
/// matches `search`.
///
/// # Return value
///
/// `Some` containing the byte index of the last matching character
/// or `None` if there is no match
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
///
/// assert_eq!(s.find('L'), Some(0));
/// assert_eq!(s.find('é'), Some(14));
///
/// // the first space
/// assert_eq!(s.find(|c: char| c.is_whitespace()), Some(5));
///
/// // neither are found
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.find(x), None);
/// ```
fn find<C: CharEq>(&self, search: C) -> Option<uint>;
/// Returns the byte index of the last character of `self` that
/// matches `search`.
///
/// # Return value
///
/// `Some` containing the byte index of the last matching character
/// or `None` if there is no match.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
///
/// assert_eq!(s.rfind('L'), Some(13));
/// assert_eq!(s.rfind('é'), Some(14));
///
/// // the second space
/// assert_eq!(s.rfind(|c: char| c.is_whitespace()), Some(12));
///
/// // searches for an occurrence of either `1` or `2`, but neither are found
/// let x: &[_] = &['1', '2'];
/// assert_eq!(s.rfind(x), None);
/// ```
fn rfind<C: CharEq>(&self, search: C) -> Option<uint>;
/// Returns the byte index of the first matching substring
///
/// # Arguments
///
/// * `needle` - The string to search for
///
/// # Return value
///
/// `Some` containing the byte index of the first matching substring
/// or `None` if there is no match.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
///
/// assert_eq!(s.find_str("老虎 L"), Some(6));
/// assert_eq!(s.find_str("muffin man"), None);
/// ```
fn find_str(&self, &str) -> Option<uint>;
/// Retrieves the first character from a string slice and returns
/// it. This does not allocate a new string; instead, it returns a
/// slice that point one character beyond the character that was
/// shifted. If the string does not contain any characters,
/// a tuple of None and an empty string is returned instead.
///
/// # Example
///
/// ```rust
/// let s = "Löwe 老虎 Léopard";
/// let (c, s1) = s.slice_shift_char();
/// assert_eq!(c, Some('L'));
/// assert_eq!(s1, "öwe 老虎 Léopard");
///
/// let (c, s2) = s1.slice_shift_char();
/// assert_eq!(c, Some('ö'));
/// assert_eq!(s2, "we 老虎 Léopard");
/// ```
fn slice_shift_char<'a>(&'a self) -> (Option<char>, &'a str);
/// Returns the byte offset of an inner slice relative to an enclosing outer slice.
///
/// Fails if `inner` is not a direct slice contained within self.
///
/// # Example
///
/// ```rust
/// let string = "a\nb\nc";
/// let lines: Vec<&str> = string.lines().collect();
/// let lines = lines.as_slice();
///
/// assert!(string.subslice_offset(lines[0]) == 0); // &"a"
/// assert!(string.subslice_offset(lines[1]) == 2); // &"b"
/// assert!(string.subslice_offset(lines[2]) == 4); // &"c"
/// ```
fn subslice_offset(&self, inner: &str) -> uint;
/// Return an unsafe pointer to the strings buffer.
///
/// The caller must ensure that the string outlives this pointer,
/// and that it is not reallocated (e.g. by pushing to the
/// string).
fn as_ptr(&self) -> *const u8;
/// Return an iterator of `u16` over the string encoded as UTF-16.
fn utf16_units<'a>(&'a self) -> Utf16CodeUnits<'a>;
/// Return the number of bytes in this string
///
/// # Example
///
/// ```
/// assert_eq!("foo".len(), 3);
/// assert_eq!("ƒoo".len(), 4);
/// ```
#[experimental = "not triaged yet"]
fn len(&self) -> uint;
/// Returns true if this slice contains no bytes
///
/// # Example
///
/// ```
/// assert!("".is_empty());
/// ```
#[inline]
#[experimental = "not triaged yet"]
fn is_empty(&self) -> bool { self.len() == 0 }
}
#[inline(never)]
fn slice_error_fail(s: &str, begin: uint, end: uint) -> ! {
assert!(begin <= end);
panic!("index {} and/or {} in `{}` do not lie on character boundary",
begin, end, s);
}
impl StrSlice for str {
#[inline]
fn contains(&self, needle: &str) -> bool {
self.find_str(needle).is_some()
}
#[inline]
fn contains_char(&self, needle: char) -> bool {
self.find(needle).is_some()
}
#[inline]
fn chars(&self) -> Chars {
Chars{iter: self.as_bytes().iter()}
}
#[inline]
fn bytes(&self) -> Bytes {
self.as_bytes().iter().map(|&b| b)
}
#[inline]
fn char_indices(&self) -> CharOffsets {
CharOffsets{front_offset: 0, iter: self.chars()}
}
#[inline]
fn split<Sep: CharEq>(&self, sep: Sep) -> CharSplits<Sep> {
CharSplits {
string: self,
only_ascii: sep.only_ascii(),
sep: sep,
allow_trailing_empty: true,
finished: false,
}
}
#[inline]
fn splitn<Sep: CharEq>(&self, count: uint, sep: Sep)
-> CharSplitsN<Sep> {
CharSplitsN {
iter: self.split(sep),
count: count,
invert: false,
}
}
#[inline]
fn split_terminator<Sep: CharEq>(&self, sep: Sep)
-> CharSplits<Sep> {
CharSplits {
allow_trailing_empty: false,
..self.split(sep)
}
}
#[inline]
fn rsplitn<Sep: CharEq>(&self, count: uint, sep: Sep)
-> CharSplitsN<Sep> {
CharSplitsN {
iter: self.split(sep),
count: count,
invert: true,
}
}
#[inline]
fn match_indices<'a>(&'a self, sep: &'a str) -> MatchIndices<'a> {
assert!(!sep.is_empty())
MatchIndices {
haystack: self,
needle: sep,
searcher: Searcher::new(self.as_bytes(), sep.as_bytes())
}
}
#[inline]
fn split_str<'a>(&'a self, sep: &'a str) -> StrSplits<'a> {
StrSplits {
it: self.match_indices(sep),
last_end: 0,
finished: false
}
}
#[inline]
fn lines(&self) -> CharSplits<char> {
self.split_terminator('\n')
}
fn lines_any(&self) -> AnyLines {
self.lines().map(|line| {
let l = line.len();
if l > 0 && line.as_bytes()[l - 1] == b'\r' { line.slice(0, l - 1) }
else { line }
})
}
#[inline]
fn char_len(&self) -> uint { self.chars().count() }
#[inline]
fn slice(&self, begin: uint, end: uint) -> &str {
// is_char_boundary checks that the index is in [0, .len()]
if begin <= end &&
self.is_char_boundary(begin) &&
self.is_char_boundary(end) {
unsafe { raw::slice_unchecked(self, begin, end) }
} else {
slice_error_fail(self, begin, end)
}
}
#[inline]
fn slice_from(&self, begin: uint) -> &str {
// is_char_boundary checks that the index is in [0, .len()]
if self.is_char_boundary(begin) {
unsafe { raw::slice_unchecked(self, begin, self.len()) }
} else {
slice_error_fail(self, begin, self.len())
}
}
#[inline]
fn slice_to(&self, end: uint) -> &str {
// is_char_boundary checks that the index is in [0, .len()]
if self.is_char_boundary(end) {
unsafe { raw::slice_unchecked(self, 0, end) }
} else {
slice_error_fail(self, 0, end)
}
}
fn slice_chars(&self, begin: uint, end: uint) -> &str {
assert!(begin <= end);
let mut count = 0;
let mut begin_byte = None;
let mut end_byte = None;
// This could be even more efficient by not decoding,
// only finding the char boundaries
for (idx, _) in self.char_indices() {
if count == begin { begin_byte = Some(idx); }
if count == end { end_byte = Some(idx); break; }
count += 1;
}
if begin_byte.is_none() && count == begin { begin_byte = Some(self.len()) }
if end_byte.is_none() && count == end { end_byte = Some(self.len()) }
match (begin_byte, end_byte) {
(None, _) => panic!("slice_chars: `begin` is beyond end of string"),
(_, None) => panic!("slice_chars: `end` is beyond end of string"),
(Some(a), Some(b)) => unsafe { raw::slice_bytes(self, a, b) }
}
}
#[inline]
fn starts_with(&self, needle: &str) -> bool {
let n = needle.len();
self.len() >= n && needle.as_bytes() == self.as_bytes()[..n]
}
#[inline]
fn ends_with(&self, needle: &str) -> bool {
let (m, n) = (self.len(), needle.len());
m >= n && needle.as_bytes() == self.as_bytes()[m-n..]
}
#[inline]
fn trim_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
let cur = match self.find(|c: char| !to_trim.matches(c)) {
None => "",
Some(i) => unsafe { raw::slice_bytes(self, i, self.len()) }
};
match cur.rfind(|c: char| !to_trim.matches(c)) {
None => "",
Some(i) => {
let right = cur.char_range_at(i).next;
unsafe { raw::slice_bytes(cur, 0, right) }
}
}
}
#[inline]
fn trim_left_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
match self.find(|c: char| !to_trim.matches(c)) {
None => "",
Some(first) => unsafe { raw::slice_bytes(self, first, self.len()) }
}
}
#[inline]
fn trim_right_chars<C: CharEq>(&self, mut to_trim: C) -> &str {
match self.rfind(|c: char| !to_trim.matches(c)) {
None => "",
Some(last) => {
let next = self.char_range_at(last).next;
unsafe { raw::slice_bytes(self, 0u, next) }
}
}
}
#[inline]
fn is_char_boundary(&self, index: uint) -> bool {
if index == self.len() { return true; }
match self.as_bytes().get(index) {
None => false,
Some(&b) => b < 128u8 || b >= 192u8,
}
}
#[inline]
fn char_range_at(&self, i: uint) -> CharRange {
if self.as_bytes()[i] < 128u8 {
return CharRange {ch: self.as_bytes()[i] as char, next: i + 1 };
}
// Multibyte case is a fn to allow char_range_at to inline cleanly
fn multibyte_char_range_at(s: &str, i: uint) -> CharRange {
let mut val = s.as_bytes()[i] as u32;
let w = UTF8_CHAR_WIDTH[val as uint] as uint;
assert!((w != 0));
val = utf8_first_byte!(val, w);
val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
return CharRange {ch: unsafe { mem::transmute(val) }, next: i + w};
}
return multibyte_char_range_at(self, i);
}
#[inline]
fn char_range_at_reverse(&self, start: uint) -> CharRange {
let mut prev = start;
prev = prev.saturating_sub(1);
if self.as_bytes()[prev] < 128 {
return CharRange{ch: self.as_bytes()[prev] as char, next: prev}
}
// Multibyte case is a fn to allow char_range_at_reverse to inline cleanly
fn multibyte_char_range_at_reverse(s: &str, mut i: uint) -> CharRange {
// while there is a previous byte == 10......
while i > 0 && s.as_bytes()[i] & !CONT_MASK == TAG_CONT_U8 {
i -= 1u;
}
let mut val = s.as_bytes()[i] as u32;
let w = UTF8_CHAR_WIDTH[val as uint] as uint;
assert!((w != 0));
val = utf8_first_byte!(val, w);
val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 1]);
if w > 2 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 2]); }
if w > 3 { val = utf8_acc_cont_byte!(val, s.as_bytes()[i + 3]); }
return CharRange {ch: unsafe { mem::transmute(val) }, next: i};
}
return multibyte_char_range_at_reverse(self, prev);
}
#[inline]
fn char_at(&self, i: uint) -> char {
self.char_range_at(i).ch
}
#[inline]
fn char_at_reverse(&self, i: uint) -> char {
self.char_range_at_reverse(i).ch
}
#[inline]
fn as_bytes(&self) -> &[u8] {
unsafe { mem::transmute(self) }
}
fn find<C: CharEq>(&self, mut search: C) -> Option<uint> {
if search.only_ascii() {
self.bytes().position(|b| search.matches(b as char))
} else {
for (index, c) in self.char_indices() {
if search.matches(c) { return Some(index); }
}
None
}
}
fn rfind<C: CharEq>(&self, mut search: C) -> Option<uint> {
if search.only_ascii() {
self.bytes().rposition(|b| search.matches(b as char))
} else {
for (index, c) in self.char_indices().rev() {
if search.matches(c) { return Some(index); }
}
None
}
}
fn find_str(&self, needle: &str) -> Option<uint> {
if needle.is_empty() {
Some(0)
} else {
self.match_indices(needle)
.next()
.map(|(start, _end)| start)
}
}
#[inline]
fn slice_shift_char(&self) -> (Option<char>, &str) {
if self.is_empty() {
return (None, self);
} else {
let CharRange {ch, next} = self.char_range_at(0u);
let next_s = unsafe { raw::slice_bytes(self, next, self.len()) };
return (Some(ch), next_s);
}
}
fn subslice_offset(&self, inner: &str) -> uint {
let a_start = self.as_ptr() as uint;
let a_end = a_start + self.len();
let b_start = inner.as_ptr() as uint;
let b_end = b_start + inner.len();
assert!(a_start <= b_start);
assert!(b_end <= a_end);
b_start - a_start
}
#[inline]
fn as_ptr(&self) -> *const u8 {
self.repr().data
}
#[inline]
fn utf16_units(&self) -> Utf16CodeUnits {
Utf16CodeUnits{ chars: self.chars(), extra: 0}
}
#[inline]
fn len(&self) -> uint { self.repr().len }
}
impl<'a> Default for &'a str {
fn default() -> &'a str { "" }
}
| 31.627611 | 99 | 0.524668 |
fe374142f3175225697fd10302aa8a11bcc46b1d
| 6,021 |
//! See [Mesh](crate::mesh::Mesh).
use crate::mesh::Mesh;
use crate::mesh::math::*;
use crate::mesh::ids::*;
/// # Transformations
impl Mesh
{
/// Moves the vertex to the specified position.
pub fn move_vertex_to(&mut self, vertex_id: VertexID, value: Vec3)
{
self.connectivity_info.set_position(vertex_id, value);
}
/// Moves the vertex by the specified vector, i.e. the new position is `mesh.vertex_position(vertex_id) + value`.
pub fn move_vertex_by(&mut self, vertex_id: VertexID, value: Vec3)
{
let p = value + self.vertex_position(vertex_id);
self.move_vertex_to(vertex_id, p);
}
/// Scales the entire mesh by multiplying `scale` to each vertex position.
///
/// # Examples
///
/// ```
/// # use tri_mesh::prelude::*;
/// #
/// # fn main() -> Result<(), Box<tri_mesh::mesh_builder::Error>> {
/// let mut mesh = MeshBuilder::new().cube().build()?;
/// # let first_face_id = mesh.face_iter().next().unwrap();
/// # let face_area_before = mesh.face_area(first_face_id);
/// mesh.scale(2.0);
/// # let face_area_after = mesh.face_area(first_face_id);
/// # assert_eq!(4.0 * face_area_before, face_area_after);
/// # mesh.is_valid().unwrap();
/// # Ok(())
/// # }
/// ```
///
pub fn scale(&mut self, scale: f64)
{
for vertex_id in self.vertex_iter() {
let p = self.vertex_position(vertex_id);
self.move_vertex_to(vertex_id, p * scale);
}
}
/// Scales the entire mesh by multiplying `scale_x` to the x component of each vertex position, `scale_y` to the y component and `scale_z` to the z component.
///
/// # Examples
///
/// ```
/// # use tri_mesh::prelude::*;
/// #
/// # fn main() -> Result<(), Box<tri_mesh::mesh_builder::Error>> {
/// let mut mesh = MeshBuilder::new().cube().build()?;
/// # let first_face_id = mesh.face_iter().find(|f| mesh.face_normal(*f) == vec3(0.0, 1.0, 0.0)).unwrap();
/// # let second_face_id = mesh.face_iter().find(|f| mesh.face_normal(*f) == vec3(1.0, 0.0, 0.0)).unwrap();
/// # let face_area_before1 = mesh.face_area(first_face_id);
/// # let face_area_before2 = mesh.face_area(second_face_id);
/// mesh.non_uniform_scale(2.0, 1.0, 1.0);
/// # assert_eq!(2.0 * face_area_before1, mesh.face_area(first_face_id));
/// # assert_eq!(face_area_before2, mesh.face_area(second_face_id));
/// # mesh.is_valid().unwrap();
/// # Ok(())
/// # }
/// ```
///
pub fn non_uniform_scale(&mut self, scale_x: f64, scale_y: f64, scale_z: f64)
{
for vertex_id in self.vertex_iter() {
let p = self.vertex_position(vertex_id);
self.move_vertex_to(vertex_id, vec3(p.x * scale_x, p.y * scale_y, p.z * scale_z));
}
}
/// Translates the entire mesh by applying the `translation` to each vertex position.
///
/// # Examples
///
/// ```
/// # use tri_mesh::prelude::*;
/// #
/// # fn main() -> Result<(), Box<tri_mesh::mesh_builder::Error>> {
/// let mut mesh = MeshBuilder::new().cube().build()?;
/// # let first_vertex_id = mesh.vertex_iter().next().unwrap();
/// # let vertex_position_before = mesh.vertex_position(first_vertex_id);
/// mesh.translate(vec3(2.5, -1.0, 0.0));
/// # let vertex_position_after = mesh.vertex_position(first_vertex_id);
/// # assert_eq!(vertex_position_before + vec3(2.5, -1.0, 0.0), vertex_position_after);
/// # mesh.is_valid().unwrap();
/// # Ok(())
/// # }
/// ```
///
pub fn translate(&mut self, translation: Vec3)
{
for vertex_id in self.vertex_iter() {
self.move_vertex_by(vertex_id, translation);
}
}
///
/// Rotates the entire mesh by applying the given `rotation` to each vertex position.
///
/// # Examples
///
/// ```
/// # use tri_mesh::prelude::*;
/// #
/// # fn main() -> Result<(), Box<tri_mesh::mesh_builder::Error>> {
/// let mut mesh = MeshBuilder::new().cube().build()?;
/// # let first_vertex_id = mesh.vertex_iter().next().unwrap();
/// # let vertex_position_before = mesh.vertex_position(first_vertex_id);
/// mesh.apply_transformation(Mat4::from_angle_y(Deg(360.0)));
/// # let vertex_position_after = mesh.vertex_position(first_vertex_id);
/// # assert!((vertex_position_before - vertex_position_after).magnitude() < 0.000001);
/// # mesh.is_valid().unwrap();
/// # Ok(())
/// # }
/// ```
///
pub fn rotate(&mut self, rotation: Mat3)
{
for vertex_id in self.vertex_iter() {
let p = self.vertex_position(vertex_id);
self.move_vertex_to(vertex_id, rotation * p);
}
}
///
/// Transforms the entire mesh by applying the `transformation` to each vertex position.
///
/// # Examples
///
/// ```
/// # use tri_mesh::prelude::*;
/// #
/// # fn main() -> Result<(), Box<tri_mesh::mesh_builder::Error>> {
/// let mut mesh = MeshBuilder::new().cube().build()?;
/// # let first_vertex_id = mesh.vertex_iter().next().unwrap();
/// # let vertex_position_before = mesh.vertex_position(first_vertex_id);
/// mesh.apply_transformation(Mat4::from_translation(vec3(2.5, -1.0, 0.0)));
/// # let vertex_position_after = mesh.vertex_position(first_vertex_id);
/// # assert_eq!(vertex_position_before + vec3(2.5, -1.0, 0.0), vertex_position_after);
/// # mesh.is_valid().unwrap();
/// # Ok(())
/// # }
/// ```
///
pub fn apply_transformation(&mut self, transformation: Mat4)
{
for vertex_id in self.vertex_iter() {
let p = self.vertex_position(vertex_id);
let p_new = (transformation * p.extend(1.0)).truncate();
self.move_vertex_to(vertex_id, p_new);
}
}
}
| 37.397516 | 162 | 0.57233 |
0ef2096d83d3edd8bcdc07487124c885af8ead8a
| 1,657 |
use chrono::{DateTime, Local};
use std::time::Duration;
/// Retry policy with exponential back-off.
///
/// Retry policy with exponential back-off (with an added random delay up to 256 ms). Each retry
/// will happen at least after an exponential wait time. So if x is the first retry wait, the
/// second will be x*2, the third x*4 and so on. The policy will retry until the maximum number of
/// retries have been reached or the maximum allowed delay has passed (whichever comes first). The
/// wait time is not precise.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ExponentialRetryPolicy {
delay: Duration,
max_retries: u32,
max_delay: Duration,
}
impl ExponentialRetryPolicy {
pub(crate) fn new(delay: Duration, max_retries: u32, max_delay: Duration) -> Self {
ExponentialRetryPolicy {
delay,
max_retries,
max_delay,
}
}
}
impl super::RetryPolicy for ExponentialRetryPolicy {
fn is_expired(&self, first_retry_time: &mut Option<DateTime<Local>>, retry_count: u32) -> bool {
if retry_count > self.max_retries {
return true;
}
let first_retry_time = first_retry_time.get_or_insert_with(|| Local::now());
let max_delay = chrono::Duration::from_std(self.max_delay)
.unwrap_or_else(|_| chrono::Duration::max_value());
Local::now() > *first_retry_time + max_delay
}
fn sleep_duration(&self, retry_count: u32) -> Duration {
let sleep_ms = self.delay.as_millis() as u64 * u64::pow(2u64, retry_count - 1)
+ rand::random::<u8>() as u64;
Duration::from_millis(sleep_ms)
}
}
| 35.255319 | 100 | 0.66204 |
ed9dd937ed18f2b390c68cf904ba3e7ec077c217
| 4,093 |
use std::io::{stdout, Write};
use crossterm::{
cursor::MoveTo,
queue,
terminal::{Clear, ClearType},
};
use crate::area::Area;
use crate::displayable_line::DisplayableLine;
use crate::errors::Result;
use crate::text::FmtText;
/// A scrollable text, in a specific area.
///
/// The text is assumed to have been computed for the given area.
///
/// For example:
///
/// ```
/// use termimad::*;
///
/// // You typically borrow those 3 vars from elsewhere
/// let markdown = "#title\n* item 1\n* item 2";
/// let area = Area::new(0, 0, 10, 12);
/// let skin = MadSkin::default();
///
/// // displaying
/// let text = skin.area_text(markdown, &area);
/// let view = TextView::from(&area, &text);
/// view.write().unwrap();
/// ```
///
/// If the text and skin are constant, you might prefer to
/// use a MadView instead of a TextView: the MadView owns
/// the mardkown string and ensures the formatted text
/// is computed accordingly to the area.
pub struct TextView<'a, 't> {
area: &'a Area,
text: &'t FmtText<'t, 't>,
pub scroll: i32, // 0 for no scroll, positive if scrolled
pub show_scrollbar: bool,
}
impl<'a, 't> TextView<'a, 't> {
/// make a displayed text, that is a text in an area
pub fn from(area: &'a Area, text: &'t FmtText<'_, '_>) -> TextView<'a, 't> {
TextView {
area,
text,
scroll: 0,
show_scrollbar: true,
}
}
#[inline(always)]
pub fn content_height(&self) -> i32 {
self.text.lines.len() as i32
}
/// return an option which when filled contains
/// a tupple with the top and bottom of the vertical
/// scrollbar. Return none when the content fits
/// the available space (or if show_scrollbar is false).
#[inline(always)]
pub fn scrollbar(&self) -> Option<(u16, u16)> {
if self.show_scrollbar {
self.area.scrollbar(self.scroll, self.content_height())
} else {
None
}
}
/// display the text in the area, taking the scroll into account.
pub fn write(&self) -> Result<()> {
let mut stdout = stdout();
self.write_on(&mut stdout)?;
stdout.flush()?;
Ok(())
}
/// display the text in the area, taking the scroll into account.
pub fn write_on<W>(&self, w: &mut W) -> Result<()>
where
W: std::io::Write,
{
let scrollbar = self.scrollbar();
let sx = self.area.left + self.area.width;
let mut i = self.scroll as usize;
for y in 0..self.area.height {
queue!(w, MoveTo(self.area.left, self.area.top + y))?;
if i < self.text.lines.len() {
let dl = DisplayableLine::new(self.text.skin, &self.text.lines[i], self.text.width);
write!(w, "{}", &dl)?;
i += 1;
}
self.text.skin.paragraph.compound_style.queue_bg(w)?;
queue!(w, Clear(ClearType::UntilNewLine))?;
if let Some((sctop, scbottom)) = scrollbar {
queue!(w, MoveTo(sx, self.area.top + y))?;
if sctop <= y && y <= scbottom {
write!(w, "{}", self.text.skin.scrollbar.thumb)?;
} else {
write!(w, "{}", self.text.skin.scrollbar.track)?;
}
}
}
Ok(())
}
/// set the scroll position but makes it fit into allowed positions.
/// Return the actual scroll.
pub fn set_scroll(&mut self, scroll: i32) -> i32 {
self.scroll = scroll
.min(self.content_height() - i32::from(self.area.height) + 1)
.max(0);
self.scroll
}
/// change the scroll position
/// lines_count can be negative
pub fn try_scroll_lines(&mut self, lines_count: i32) -> i32{
self.set_scroll(self.scroll + lines_count)
}
/// change the scroll position
/// pages_count can be negative
pub fn try_scroll_pages(&mut self, pages_count: i32) -> i32{
self.try_scroll_lines(pages_count * i32::from(self.area.height))
}
}
| 30.774436 | 100 | 0.562179 |
09a7c84baa07e1b73717cfe432ba7991696b931c
| 736 |
use responses::raw::chat::Chat;
use error::UnexpectedResponse;
use try_from::TryFrom;
#[derive(Clone, Debug)]
pub struct Channel {
pub id: i64,
pub title: String,
pub username: Option<String>,
}
impl TryFrom<Chat> for Channel {
type Error = UnexpectedResponse;
fn try_from(chat: Chat) -> Result<Self, UnexpectedResponse> {
match (chat.id, chat.title, chat.username, chat.typ.as_ref()) {
(id, Some(title), username, "channel") =>
Ok(Channel {
id,
title,
username,
}),
_ =>
Err(UnexpectedResponse::ConvertError(String::from("Wrong chat. Excepted channel")))
}
}
}
| 27.259259 | 99 | 0.550272 |
dedbd1ee08e907be883bd6de9ef228f53eb2644e
| 181 |
fn main() {
let s = String::from("hello");
let len1 = String::len(&s);
let len2 = s.len(); // shorthand for the above
println!("len1 = {} = len2 = {}", len1, len2)
}
| 30.166667 | 50 | 0.530387 |
56e4472abb4a54cade94e5862b68ff4e85d34378
| 4,296 |
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files
// DO NOT EDIT
use crate::Setting;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::object::Cast;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::object::ObjectType as ObjectType_;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::signal::connect_raw;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::signal::SignalHandlerId;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::translate::*;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use glib::ToValue;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use std::boxed::Box as Box_;
use std::fmt;
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
use std::mem::transmute;
glib::wrapper! {
#[doc(alias = "NMSetting6Lowpan")]
pub struct Setting6Lowpan(Object<ffi::NMSetting6Lowpan, ffi::NMSetting6LowpanClass>) @extends Setting;
match fn {
type_ => || ffi::nm_setting_6lowpan_get_type(),
}
}
impl Setting6Lowpan {
/// Creates a new [`Setting6Lowpan`][crate::Setting6Lowpan] object with default values.
///
/// # Returns
///
/// the new empty [`Setting6Lowpan`][crate::Setting6Lowpan] object
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
#[doc(alias = "nm_setting_6lowpan_new")]
pub fn new() -> Setting6Lowpan {
unsafe { Setting::from_glib_full(ffi::nm_setting_6lowpan_new()).unsafe_cast() }
}
///
/// # Returns
///
/// the `property::Setting6Lowpan::parent` property of the setting
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
#[doc(alias = "nm_setting_6lowpan_get_parent")]
#[doc(alias = "get_parent")]
pub fn parent(&self) -> Option<glib::GString> {
unsafe { from_glib_none(ffi::nm_setting_6lowpan_get_parent(self.to_glib_none().0)) }
}
/// If given, specifies the parent interface name or parent connection UUID
/// from which this 6LowPAN interface should be created.
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
pub fn set_parent(&self, parent: Option<&str>) {
unsafe {
glib::gobject_ffi::g_object_set_property(
self.as_ptr() as *mut glib::gobject_ffi::GObject,
b"parent\0".as_ptr() as *const _,
parent.to_value().to_glib_none().0,
);
}
}
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
#[doc(alias = "parent")]
pub fn connect_parent_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_parent_trampoline<F: Fn(&Setting6Lowpan) + 'static>(
this: *mut ffi::NMSetting6Lowpan,
_param_spec: glib::ffi::gpointer,
f: glib::ffi::gpointer,
) {
let f: &F = &*(f as *const F);
f(&from_glib_borrow(this))
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::parent\0".as_ptr() as *const _,
Some(transmute::<_, unsafe extern "C" fn()>(
notify_parent_trampoline::<F> as *const (),
)),
Box_::into_raw(f),
)
}
}
}
#[cfg(any(feature = "v1_14", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v1_14")))]
impl Default for Setting6Lowpan {
fn default() -> Self {
Self::new()
}
}
impl fmt::Display for Setting6Lowpan {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Setting6Lowpan")
}
}
| 36.10084 | 106 | 0.587756 |
3993090c2b5079742fbc42c3452b63bbdc617d10
| 733 |
pub mod p1;
pub mod p2;
pub mod p3;
pub mod p4;
pub mod p5;
pub mod p6;
pub mod p7;
pub mod p8;
pub mod p9;
pub mod p10;
pub mod p11;
pub mod p12;
pub mod p13;
pub mod p14;
pub mod p15;
pub mod p16;
pub mod p17;
pub mod p18;
pub mod p19;
pub mod p20;
pub mod p21;
pub mod p22;
pub mod p23;
pub mod p24;
pub mod p25;
pub mod p26;
pub mod p27;
pub mod p28;
pub mod p29;
pub mod p30;
pub mod p31;
pub mod p32;
pub mod p33;
pub mod p34;
pub mod p35;
pub mod p36;
pub mod p37;
pub mod p38;
pub mod p39;
pub mod p40;
pub mod p41;
pub mod p42;
pub mod p43;
pub mod p44;
pub mod p45;
pub mod p46;
pub mod p47;
pub mod p48;
pub mod p49;
pub mod p50;
pub mod p51;
pub mod p52;
pub mod p53;
pub mod p54;
pub mod p55;
pub mod p56;
pub mod p57;
| 12.423729 | 12 | 0.687585 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.