file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
reject_owner.py
# Copyright 2019 Contributors to Hyperledger Sawtooth # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ----------------------------------------------------------------------------- """Implements the REJECT_ADD_ROLE_OWNER message usage: rbac.role.owner.reject.create()""" import logging from rbac.common import addresser from rbac.common.proposal.proposal_reject import ProposalReject LOGGER = logging.getLogger(__name__) class RejectAddRoleOwner(ProposalReject): """Implements the REJECT_ADD_ROLE_OWNER message usage: rbac.role.owner.reject.create()""" def __init__(self): super().__init__() self._register() @property def message_action_type(self): """The action type performed by this message""" return addresser.MessageActionType.REJECT @property def message_subaction_type(self): """The subsequent action performed or proposed by this message""" return addresser.MessageActionType.ADD @property def message_object_type(self): """The object type this message acts upon""" return addresser.ObjectType.ROLE @property def message_related_type(self): """the object type of the related object this message acts upon""" return addresser.ObjectType.USER @property def message_relationship_type(self): """The relationship type this message acts upon""" return addresser.RelationshipType.OWNER def make_addresses(self, message, signer_user_id):
def validate_state(self, context, message, payload, input_state, store): """Validates that: 1. the signer is an owner of the role""" super().validate_state( context=context, message=message, payload=payload, input_state=input_state, store=store, ) # TODO: change to verify proposal assignment and hierarchy # TODO: should be owners # if not addresser.role.admin.exists_in_state_inputs( # inputs=payload.inputs, # input_state=input_state, # object_id=message.object_id, # related_id=payload.signer.user_id, # ): # raise ValueError( # "Signer {} must be an admin of the role {}".format( # payload.signer.user_id, message.object_id # ) # )
"""Makes the appropriate inputs & output addresses for the message""" inputs, outputs = super().make_addresses(message, signer_user_id) # should be owner not admin signer_admin_address = addresser.role.admin.address( message.object_id, signer_user_id ) inputs.add(signer_admin_address) signer_owner_address = addresser.role.owner.address( message.object_id, signer_user_id ) inputs.add(signer_owner_address) proposal_address = self.address( object_id=message.object_id, related_id=message.related_id ) inputs.add(proposal_address) outputs.add(proposal_address) return inputs, outputs
primitiveDropdownMenu.jsdoc.js
/** * @typedef {Object} Item
* @property {string} label Label of the item. * @property {string} iconName The Lightning Design System name of the icon displayed before the item label. Names are written in the format 'standard:account' where 'standard' is the category, and 'account' is the specific icon to be displayed. */
* @name items * @property {string} name Unique name for the item.
misc.ts
import { EventType } from 'engine/enums'; import _events from 'engine/events'; import _map from 'engine/map'; import { setVarc } from 'engine/var'; import { sendMessage, sendCommandResponse } from 'shared/chat'; import { lookupPlayerName, toFormattedTime } from 'shared/util'; import { openCentralWidget, openWidget } from 'shared/widget'; import _coords from 'shared/map/coords'; import { runAnim } from 'shared/anim'; import { teleport } from 'shared/map'; import { lendItem } from '../../trade/loan'; var Virtue = Java.type('org.virtue.Virtue'); var World = Java.type('org.virtue.game.World'); _events.bindEventListener(EventType.COMMAND_ADMIN, "root", (ctx) => { var parent = parseInt(ctx.cmdArgs[0]); ctx.player.getDispatcher().sendRootWidget(parent); }); _events.bindEventListener(EventType.COMMAND_ADMIN, ["coords","pos","mypos"], (ctx) => { sendCommandResponse(ctx.player, lookupPlayerName(ctx.player) +" "+ _map.getCoords(ctx.player) +" or "+ _map.getCoordX(ctx.player)+" "+ _map.getCoordY(ctx.player), ctx.console); });
var objId = 4151, duration=0; if (args.length > 0) { objId = parseInt(args[0]); } if (args.length > 1) { duration = parseInt(args[1]); } lendItem(ctx.player, ctx.player, objId, duration); }); _events.bindEventListener(EventType.COMMAND_ADMIN, [ "inter", "if", "widget" ], (ctx) => { var player = ctx.player; var args = ctx.cmdArgs; if (args.length < 1 || isNaN(parseInt(args[0]))) { sendCommandResponse(player, "Usage: "+ctx.syntax+" [id]", ctx.console); return; } if (args.length >= 3) { var parent = parseInt(args[0]); var slot = parseInt(args[1]); var sub = parseInt(args[2]); openWidget(player, parent, slot, sub, false); } else { openCentralWidget(player, parseInt(args[0]), false); } }); _events.bindEventListener(EventType.COMMAND_ADMIN, "uptime", (ctx) => { var ticks = ENGINE.getServerCycle(); var time = toFormattedTime(ticks); sendCommandResponse(ctx.player, "Server has been online for "+time+".", ctx.console); }); _events.bindEventListener(EventType.COMMAND_ADMIN, "setKey", (ctx) => { var player = ctx.player; var args = ctx.cmdArgs; var amount = parseInt(args[0]); player.setKeys(amount); setVarc(player, 1800, player.getKeys() - 1); sendMessage(player, "You now have "+(player.getKeys())+" for Treasure Hunter."); }); _events.bindEventListener(EventType.COMMAND_ADMIN, ["priceReload", "reloadPrice"], (ctx) => { Virtue.getInstance().getExchange().loadPrices(); }); _events.bindEventListener(EventType.COMMAND_ADMIN, "adr", (ctx) => { ctx.player.getCombatSchedule().updateAdrenaline(100); }); _events.bindEventListener(EventType.COMMAND_ADMIN, "adminroom", (ctx) => { teleport(ctx.player, _coords(2845, 5154, 0), 18007); }); _events.bindEventListener(EventType.COMMAND_ADMIN, "forcetalk", (ctx) => { var args = ctx.cmdArgs; var message = ""; for (var i = 0; i < args.length; i++) { message += (i === 0 ? (args[i].substring(0, 1).toUpperCase() + args[i].substring(1)) : args[i]) + (i == args.length - 1 ? "" : " "); } var iterate = World.getInstance().getPlayers().iterator(); var player = null; while (iterate.hasNext()) { player = iterate.next(); ENGINE.playerForceSay(player, message, false); } }); _events.bindEventListener(EventType.COMMAND_ADMIN, "forcedance", (ctx) => { var iterate = ENGINE.getPlayerIterator(ENGINE.getWorld()); var p2 = null; while (iterate.hasNext()) { p2 = iterate.next(); p2.getAppearance().setRenderAnimation(3171); p2.getAppearance().refresh(); runAnim(p2, 7071);//7071 } });
_events.bindEventListener(EventType.COMMAND_ADMIN, "testloan", (ctx) => { var args = ctx.cmdArgs;
premium_messaging_regions_operations.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- import uuid from msrest.pipeline import ClientRawResponse from .. import models class PremiumMessagingRegionsOperations(object): """PremiumMessagingRegionsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client API version. Constant value: "2017-04-01". """ models = models def __init__(self, client, config, serializer, deserializer):
def list( self, custom_headers=None, raw=False, **operation_config): """Gets the available premium messaging regions for servicebus . :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of PremiumMessagingRegions :rtype: ~azure.mgmt.servicebus.models.PremiumMessagingRegionsPaged[~azure.mgmt.servicebus.models.PremiumMessagingRegions] :raises: :class:`ErrorResponseException<azure.mgmt.servicebus.models.ErrorResponseException>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.ErrorResponseException(self._deserialize, response) return response # Deserialize response deserialized = models.PremiumMessagingRegionsPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.PremiumMessagingRegionsPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/premiumMessagingRegions'}
self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-04-01" self.config = config
exercise-12.ts
import { BehaviorSubject, fromEvent, interval, NEVER } from 'rxjs' import { map, switchMap, tap } from 'rxjs/operators' const state = new BehaviorSubject({ counter: 0, isStarted: false })
switchMap(x => (x.isStarted ? interval(1_000).pipe(map(() => x.counter)) : NEVER)), tap(() => state.next({ ...state.value, counter: state.value.counter + 1 })) ) export const play = () => fromEvent(document.querySelector('#exercise-12-play')!, 'click').pipe( tap(() => state.next({ ...state.value, isStarted: true })) ) export const pause = () => fromEvent(document.querySelector('#exercise-12-pause')!, 'click').pipe( tap(() => state.next({ ...state.value, isStarted: false })) ) export const reset = () => fromEvent(document.querySelector('#exercise-12-reset')!, 'click').pipe( tap(() => state.next({ counter: 0, isStarted: false })) )
export const counter = () => state.asObservable().pipe(
msgpack.rs
//! Automatic MessagePack (de)serialization support. //! //! See [`MsgPack`](crate::serde::msgpack::MsgPack) for further details. //! //! # Enabling //! //! This module is only available when the `json` feature is enabled. Enable it //! in `Cargo.toml` as follows: //! //! ```toml //! [dependencies.rocket] //! version = "0.5.0-rc.2" //! features = ["msgpack"] //! ``` //! //! # Testing //! //! The [`LocalRequest`] and [`LocalResponse`] types provide [`msgpack()`] and //! [`into_msgpack()`] methods to create a request with serialized MessagePack //! and deserialize a response as MessagePack, respectively. //! //! [`LocalRequest`]: crate::local::blocking::LocalRequest //! [`LocalResponse`]: crate::local::blocking::LocalResponse //! [`msgpack()`]: crate::local::blocking::LocalRequest::msgpack() //! [`into_msgpack()`]: crate::local::blocking::LocalResponse::into_msgpack() use std::io; use std::ops::{Deref, DerefMut}; use crate::request::{Request, local_cache}; use crate::data::{Limits, Data, FromData, Outcome}; use crate::response::{self, Responder, content}; use crate::http::Status; use crate::form::prelude as form; // use crate::http::uri::fmt; use serde::{Serialize, Deserialize}; #[doc(inline)] pub use rmp_serde::decode::Error; /// The MessagePack guard: easily consume and return MessagePack. /// /// ## Sending MessagePack /// /// To respond with serialized MessagePack data, return a `MsgPack<T>` type, /// where `T` implements [`Serialize`] from [`serde`]. The content type of the /// response is set to `application/msgpack` automatically. /// /// ```rust /// # #[macro_use] extern crate rocket; /// # type User = usize; /// use rocket::serde::msgpack::MsgPack; /// /// #[get("/users/<id>")] /// fn user(id: usize) -> MsgPack<User> { /// let user_from_id = User::from(id); /// /* ... */ /// MsgPack(user_from_id) /// } /// ``` /// /// ## Receiving MessagePack /// /// `MsgPack` is both a data guard and a form guard. /// /// ### Data Guard /// /// To deserialize request body data as MessagePack, add a `data` route /// argument with a target type of `MsgPack<T>`, where `T` is some type you'd /// like to parse from JSON. `T` must implement [`serde::Deserialize`]. /// /// ```rust /// # #[macro_use] extern crate rocket; /// # type User = usize; /// use rocket::serde::msgpack::MsgPack; /// /// #[post("/users", format = "msgpack", data = "<user>")] /// fn new_user(user: MsgPack<User>) { /// /* ... */ /// } /// ``` /// /// You don't _need_ to use `format = "msgpack"`, but it _may_ be what you want. /// Using `format = msgpack` means that any request that doesn't specify /// "application/msgpack" as its first `Content-Type:` header parameter will not /// be routed to this handler. /// /// ### Form Guard /// /// `MsgPack<T>`, as a form guard, accepts value and data fields and parses the /// data as a `T`. Simple use `MsgPack<T>`: /// /// ```rust /// # #[macro_use] extern crate rocket; /// # type Metadata = usize; /// use rocket::form::{Form, FromForm}; /// use rocket::serde::msgpack::MsgPack; /// /// #[derive(FromForm)] /// struct User<'r> { /// name: &'r str, /// metadata: MsgPack<Metadata> /// } /// /// #[post("/users", data = "<form>")] /// fn new_user(form: Form<User<'_>>) { /// /* ... */ /// } /// ``` /// /// ### Incoming Data Limits /// /// The default size limit for incoming MessagePack data is 1MiB. Setting a /// limit protects your application from denial of service (DOS) attacks and /// from resource exhaustion through high memory consumption. The limit can be /// increased by setting the `limits.msgpack` configuration parameter. For /// instance, to increase the MessagePack limit to 5MiB for all environments, /// you may add the following to your `Rocket.toml`: /// /// ```toml /// [global.limits] /// msgpack = 5242880 /// ``` #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct MsgPack<T>(pub T); impl<T> MsgPack<T> { /// Consumes the `MsgPack` wrapper and returns the wrapped item. /// /// # Example /// /// ```rust /// # use rocket::serde::msgpack::MsgPack; /// let string = "Hello".to_string(); /// let my_msgpack = MsgPack(string); /// assert_eq!(my_msgpack.into_inner(), "Hello".to_string()); /// ``` #[inline(always)] pub fn into_inner(self) -> T { self.0 } } impl<'r, T: Deserialize<'r>> MsgPack<T> { fn from_bytes(buf: &'r [u8]) -> Result<Self, Error> { rmp_serde::from_slice(buf).map(MsgPack) } async fn from_data(req: &'r Request<'_>, data: Data<'r>) -> Result<Self, Error> { let limit = req.limits().get("msgpack").unwrap_or(Limits::MESSAGE_PACK); let bytes = match data.open(limit).into_bytes().await { Ok(buf) if buf.is_complete() => buf.into_inner(), Ok(_) => { let eof = io::ErrorKind::UnexpectedEof; return Err(Error::InvalidDataRead(io::Error::new(eof, "data limit exceeded"))); }, Err(e) => return Err(Error::InvalidDataRead(e)), }; Self::from_bytes(local_cache!(req, bytes)) } } #[crate::async_trait] impl<'r, T: Deserialize<'r>> FromData<'r> for MsgPack<T> { type Error = Error; async fn from_data(req: &'r Request<'_>, data: Data<'r>) -> Outcome<'r, Self> { match Self::from_data(req, data).await { Ok(value) => Outcome::Success(value), Err(Error::InvalidDataRead(e)) if e.kind() == io::ErrorKind::UnexpectedEof => { Outcome::Failure((Status::PayloadTooLarge, Error::InvalidDataRead(e))) }, | Err(e@Error::TypeMismatch(_)) | Err(e@Error::OutOfRange) | Err(e@Error::LengthMismatch(_)) => { Outcome::Failure((Status::UnprocessableEntity, e)) }, Err(e) => Outcome::Failure((Status::BadRequest, e)), } } } /// Serializes the wrapped value into MessagePack. Returns a response with /// Content-Type `MsgPack` and a fixed-size body with the serialization. If /// serialization fails, an `Err` of `Status::InternalServerError` is returned. impl<'r, T: Serialize> Responder<'r, 'static> for MsgPack<T> { fn respond_to(self, req: &'r Request<'_>) -> response::Result<'static> { let buf = rmp_serde::to_vec(&self.0) .map_err(|e| { error_!("MsgPack failed to serialize: {:?}", e); Status::InternalServerError })?; content::RawMsgPack(buf).respond_to(req) } } #[crate::async_trait] impl<'v, T: Deserialize<'v> + Send> form::FromFormField<'v> for MsgPack<T> { // TODO: To implement `from_value`, we need to the raw string so we can // decode it into bytes as opposed to a string as it won't be UTF-8. async fn from_data(f: form::DataField<'v, '_>) -> Result<Self, form::Errors<'v>> { Self::from_data(f.request, f.data).await.map_err(|e| { match e { Error::InvalidMarkerRead(e) | Error::InvalidDataRead(e) => e.into(), Error::Utf8Error(e) => e.into(), _ => form::Error::custom(e).into(), } }) } } // impl<T: Serialize> fmt::UriDisplay<fmt::Query> for MsgPack<T> { // fn fmt(&self, f: &mut fmt::Formatter<'_, fmt::Query>) -> std::fmt::Result { // let bytes = to_vec(&self.0).map_err(|_| std::fmt::Error)?; // let encoded = crate::http::RawStr::percent_encode_bytes(&bytes); // f.write_value(encoded.as_str()) // } // } impl<T> From<T> for MsgPack<T> { fn
(value: T) -> Self { MsgPack(value) } } impl<T> Deref for MsgPack<T> { type Target = T; #[inline(always)] fn deref(&self) -> &T { &self.0 } } impl<T> DerefMut for MsgPack<T> { #[inline(always)] fn deref_mut(&mut self) -> &mut T { &mut self.0 } } /// Deserialize an instance of type `T` from MessagePack encoded bytes. /// /// Deserialization is performed in a zero-copy manner whenever possible. /// /// **_Always_ use [`MsgPack`] to deserialize MessagePack request data.** /// /// # Example /// /// ``` /// use rocket::serde::{Deserialize, msgpack}; /// /// #[derive(Debug, PartialEq, Deserialize)] /// #[serde(crate = "rocket::serde")] /// struct Data<'r> { /// framework: &'r str, /// stars: usize, /// } /// /// let bytes = &[ /// 130, 169, 102, 114, 97, 109, 101, 119, 111, 114, 107, 166, 82, 111, /// 99, 107, 101, 116, 165, 115, 116, 97, 114, 115, 5 /// ]; /// /// let data: Data = msgpack::from_slice(bytes).unwrap(); /// assert_eq!(data, Data { framework: "Rocket", stars: 5, }); /// ``` /// /// # Errors /// /// Deserialization fails if `v` does not represent a valid MessagePack encoding /// of any instance of `T` or if `T`'s `Deserialize` implementation fails /// otherwise. #[inline(always)] pub fn from_slice<'a, T>(v: &'a [u8]) -> Result<T, Error> where T: Deserialize<'a>, { rmp_serde::from_slice(v) } /// Serialize a `T` into a MessagePack byte vector with compact representation. /// /// The compact representation represents structs as arrays. /// /// **_Always_ use [`MsgPack`] to serialize MessagePack response data.** /// /// # Example /// /// ``` /// use rocket::serde::{Deserialize, Serialize, msgpack}; /// /// #[derive(Deserialize, Serialize)] /// #[serde(crate = "rocket::serde")] /// struct Data<'r> { /// framework: &'r str, /// stars: usize, /// } /// /// let bytes = &[146, 166, 82, 111, 99, 107, 101, 116, 5]; /// let data: Data = msgpack::from_slice(bytes).unwrap(); /// let byte_vec = msgpack::to_compact_vec(&data).unwrap(); /// assert_eq!(bytes, &byte_vec[..]); /// ``` /// /// # Errors /// /// Serialization fails if `T`'s `Serialize` implementation fails. #[inline(always)] pub fn to_compact_vec<T>(value: &T) -> Result<Vec<u8>, rmp_serde::encode::Error> where T: Serialize + ?Sized { rmp_serde::to_vec(value) } /// Serialize a `T` into a MessagePack byte vector with named representation. /// /// The named representation represents structs as maps with field names. /// /// **_Always_ use [`MsgPack`] to serialize MessagePack response data.** /// /// # Example /// /// ``` /// use rocket::serde::{Deserialize, Serialize, msgpack}; /// /// #[derive(Deserialize, Serialize)] /// #[serde(crate = "rocket::serde")] /// struct Data<'r> { /// framework: &'r str, /// stars: usize, /// } /// /// let bytes = &[ /// 130, 169, 102, 114, 97, 109, 101, 119, 111, 114, 107, 166, 82, 111, /// 99, 107, 101, 116, 165, 115, 116, 97, 114, 115, 5 /// ]; /// /// let data: Data = msgpack::from_slice(bytes).unwrap(); /// let byte_vec = msgpack::to_vec(&data).unwrap(); /// assert_eq!(bytes, &byte_vec[..]); /// ``` /// /// # Errors /// /// Serialization fails if `T`'s `Serialize` implementation fails. #[inline(always)] pub fn to_vec<T>(value: &T) -> Result<Vec<u8>, rmp_serde::encode::Error> where T: Serialize + ?Sized { rmp_serde::to_vec_named(value) }
from
response_info.rs
use super::{Response, ResponseError, ERROR_CODE, FAILURE_CODE, SUCCESS_CODE}; use xml_rpc::{self, Value}; pub struct ResponseInfo { pub code: i32, pub message: String, pub data: Value, } impl ResponseInfo { #[inline] pub fn new(code: i32, message: String, data: Value) -> Self { Self { code, message, data, } } #[inline] pub fn from_array(parameters: &[Value]) -> Response<Self> { match *parameters { [Value::Int(code), Value::String(ref message), ref data] => Ok(Self::new(code, message.clone(), data.clone())), _ => return Err(ResponseError::Server(format!( "Response with three parameters (int code, str msg, value) expected from server, received: {:?}", parameters ))), } } #[inline] pub fn from_response(response: Response<Value>, message: &str) -> Self { match response { Ok(data) => Self::from_response_success(data, message), Err(err) => Self::from_response_error(err), } } #[inline] pub fn from_response_error(err: ResponseError) -> Self { match err { ResponseError::Client(msg) => Self::from_client_error(msg), ResponseError::Server(msg) => Self::from_server_error(msg), } } #[inline] pub fn from_client_error(message: String) -> Self { Self::new(ERROR_CODE, message, Value::Int(0)) } #[inline] pub fn from_server_error(message: String) -> Self { Self::new(FAILURE_CODE, message, Value::Int(0)) } #[inline] pub fn from_response_success(data: Value, message: &str) -> Self
} impl Into<xml_rpc::Response> for ResponseInfo { fn into(self) -> xml_rpc::Response { let code = Value::Int(self.code); let message = Value::String(self.message); Ok(vec![Value::Array(vec![code, message, self.data])]) } } impl Into<Response<Value>> for ResponseInfo { fn into(self) -> Response<Value> { match self.code { SUCCESS_CODE => Ok(self.data), ERROR_CODE => Err(ResponseError::Client(self.message)), FAILURE_CODE => Err(ResponseError::Server(self.message)), _ => Err(ResponseError::Server(format!( "Bad response code \"{}\" returned from server", self.code ))), } } }
{ Self::new(SUCCESS_CODE, message.to_owned(), data) }
0007_alter_movie_user_charge.py
# Generated by Django 3.2.8 on 2021-11-05 00:25 from django.db import migrations, models class Migration(migrations.Migration):
dependencies = [ ('moviestore', '0006_movie_user_charge'), ] operations = [ migrations.AlterField( model_name='movie', name='user_charge', field=models.DecimalField(blank=True, decimal_places=2, max_digits=5), ), ]
display.rs
mod error; use crate::light::LightSensor; use crate::weather::{currently_raining, high_low_temp, next_rain_start_or_stop, OpenWeather}; pub use error::Error; use chrono::{DateTime, Datelike, Local, Month, Timelike}; use num_traits::cast::FromPrimitive; #[cfg(target_arch = "arm")] use embedded_graphics::{ egrectangle, egtext, fonts::Font12x16, fonts::Font24x32, pixelcolor::Rgb565, prelude::*, primitive_style, text_style, }; #[cfg(target_arch = "arm")] use hd44780_driver::{ bus::FourBitBus, Cursor, CursorBlink, Display as HD44780DisplaySetting, DisplayMode, HD44780, }; #[cfg(target_arch = "arm")] use ht16k33::HT16K33; #[cfg(target_arch = "arm")] use ili9341::{Ili9341, Orientation}; #[cfg(target_arch = "arm")] use linux_embedded_hal::sysfs_gpio::Direction; #[cfg(target_arch = "arm")] use linux_embedded_hal::{Delay, Pin}; #[cfg(target_arch = "arm")] use log::debug; #[cfg(target_arch = "arm")] use rppal::i2c::I2c; #[cfg(target_arch = "arm")] use rppal::pwm::{Channel, Polarity, Pwm}; #[cfg(target_arch = "arm")] use rppal::spi::{Bus, Mode, SlaveSelect, Spi}; const UNIT_CHAR: char = 'F'; // To enable heterogenous abstractions over multiple display types pub enum DisplayType<'a, T: LightSensor> { Console16x2(Console16x2Display<'a, T>), Console20x4(Console20x4Display<'a, T>), #[cfg(target_arch = "arm")] LCD16x2(LCD16x2Display<'a, T>), #[cfg(target_arch = "arm")] LCD20x4(LCD20x4Display<'a, T>), #[cfg(target_arch = "arm")] ILI9341(ILI9341Display<'a, T>), #[cfg(target_arch = "arm")] AlphaNum4(AlphaNum4Display<'a, T>), #[cfg(target_arch = "arm")] SevenSegment4(SevenSegment4Display<'a, T>), Composite(&'a mut [DisplayType<'a, T>]), } impl<'a, T: LightSensor> DisplayType<'a, T> { pub fn print( &mut self, time: &DateTime<Local>, current_state_index: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { match &mut *self { Self::Console16x2(display) => display.print(time, current_state_index, weather), Self::Console20x4(display) => display.print(time, current_state_index, weather), #[cfg(target_arch = "arm")] Self::LCD16x2(display) => display.print(time, current_state_index, weather), #[cfg(target_arch = "arm")] Self::LCD20x4(display) => display.print(time, current_state_index, weather), #[cfg(target_arch = "arm")] Self::ILI9341(display) => display.print(time, current_state_index, weather), #[cfg(target_arch = "arm")] Self::AlphaNum4(display) => display.print(time, current_state_index, weather), #[cfg(target_arch = "arm")] Self::SevenSegment4(display) => display.print(time, current_state_index, weather), Self::Composite(displays) => { for d in displays.iter_mut() { d.print(time, current_state_index, weather)?; } Ok(()) } } } } pub trait Display { fn print( &mut self, time: &DateTime<Local>, current_state_index: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error>; } pub struct Console16x2Display<'a, T: LightSensor> { light_sensor: &'a T, } impl<'a, T: LightSensor> Console16x2Display<'a, T> { pub fn new(light_sensor: &'a T) -> Console16x2Display<'a, T> { Console16x2Display { light_sensor: light_sensor, } } } impl<'a, T: LightSensor> Display for Console16x2Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, _: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let (weather_desc, temp_str) = console_weather_and_temp_str(&weather, 3, 7); let first_row = format!("{} {:>10}", console_time_str(&time), weather_desc); let second_row = format!("{} {}", console_date_str(&time), temp_str); println!(); println!("-{}-", std::iter::repeat("-").take(16).collect::<String>()); println!("|{}|", first_row); println!("|{}|", second_row); println!("-{}-", std::iter::repeat("-").take(16).collect::<String>()); println!( "Current light: {}", self.light_sensor.read_light_normalized()? ); Ok(()) } } fn console_date_str(time: &DateTime<Local>) -> String { format!( "{} {} {:<2}", &time.weekday().to_string()[0..3], &mmm_from_time(time), time.day() ) } fn console_time_str(time: &DateTime<Local>) -> String { let st = split_time(time); format!("{}{}:{}{}", st[0], st[1], st[2], st[3]) } fn console_weather_and_temp_str( weather: &Option<OpenWeather>, temp_digits: usize, weather_chars: usize, ) -> (String, String)
pub struct Console20x4Display<'a, T: LightSensor> { light_sensor: &'a T, } impl<'a, T: LightSensor> Console20x4Display<'a, T> { pub fn new(light_sensor: &'a T) -> Console20x4Display<'a, T> { Console20x4Display { light_sensor: light_sensor, } } } impl<'a, T: LightSensor> Display for Console20x4Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, current_state_index: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let (weather_desc, temp_str) = console_weather_and_temp_str(&weather, 3, 14); let (high_temp_str, low_temp_str) = high_low_strs(&weather); // time is always 5 chars, date is always 10 chars let first_row = format!("{} {:>14}", console_time_str(&time), weather_desc); let second_row = format!("{} {:>9}", console_date_str(&time), temp_str); let third_row = format!("{:<20}", ""); let fourth_row = match current_state_index { 0 => format!("{:<20}", rain_forecast_str(&weather)), 1 => format!("{:<20}", high_temp_str,), 2 => format!("{:<20}", low_temp_str), _ => panic!("Invalid state index"), }; println!(); println!("-{}-", std::iter::repeat("-").take(20).collect::<String>()); println!("|{}|", first_row); println!("|{}|", second_row); println!("|{}|", third_row); println!("|{}|", fourth_row); println!("-{}-", std::iter::repeat("-").take(20).collect::<String>()); println!( "Current light: {}", self.light_sensor.read_light_normalized()? ); Ok(()) } } fn rain_forecast_str(weather: &Option<OpenWeather>) -> String { match weather { Some(w) => match next_rain_start_or_stop(&w) { Some(ts) => { if currently_raining(&w) { format!("Rain stops at {:02}:00", ts.hour()) } else { format!("Rain starts at {:02}:00", ts.hour()) } } None => { if currently_raining(&w) { "Rain for next 24h".to_string() } else { "No rain for next 24h".to_string() } } }, None => "".to_string(), } } fn high_low_strs(weather: &Option<OpenWeather>) -> (String, String) { match weather { Some(w) => { let ((high_time, high_temp), (low_time, low_temp)) = high_low_temp(&w); ( format!( "High: {}°F at {:02}:00", high_temp.round(), high_time.hour() ), format!("Low: {}°F at {:02}:00", low_temp.round(), low_time.hour()), ) } None => ("".to_string(), "".to_string()), } } fn mmm_from_time(time: &DateTime<Local>) -> String { Month::from_u32(time.month()) .expect("failed to parse month from datetime provided by operating system") .name()[0..3] .to_owned() } #[cfg(target_arch = "arm")] pub struct LCD16x2Display<'a, T: LightSensor> { lcd: HD44780< FourBitBus< linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, >, >, brightness_pwm: Pwm, light_sensor: &'a T, } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> LCD16x2Display<'a, T> { pub fn new(light_sensor: &'a T) -> Result<Self, Error> { // Using BCM numbers // i.e. pin 0 corresponds to wiringpi 30 and physical 27 let rs = Pin::new(21); let en = Pin::new(20); let db4 = Pin::new(26); let db5 = Pin::new(13); let db6 = Pin::new(6); let db7 = Pin::new(5); let r = Pin::new(17); let g = Pin::new(16); let b = Pin::new(19); let default_brightness = 1.0; // pwm0 is pin 18 let pwm0 = Pwm::with_frequency( Channel::Pwm0, 20000.0, default_brightness, Polarity::Normal, false, )?; pwm0.enable()?; rs.export()?; en.export()?; db4.export()?; db5.export()?; db6.export()?; db7.export()?; r.export()?; g.export()?; b.export()?; rs.set_direction(Direction::Low)?; en.set_direction(Direction::Low)?; db4.set_direction(Direction::Low)?; db5.set_direction(Direction::Low)?; db6.set_direction(Direction::Low)?; db7.set_direction(Direction::Low)?; r.set_direction(Direction::Low)?; // Default to red on; green and blue off g.set_direction(Direction::High)?; b.set_direction(Direction::High)?; let mut lcd = HD44780::new_4bit(rs, en, db4, db5, db6, db7, &mut Delay)?; lcd.reset(&mut Delay)?; lcd.clear(&mut Delay)?; lcd.set_display_mode( DisplayMode { display: HD44780DisplaySetting::On, cursor_visibility: Cursor::Invisible, cursor_blink: CursorBlink::Off, }, &mut Delay, )?; Ok(LCD16x2Display { lcd: lcd, brightness_pwm: pwm0, light_sensor: light_sensor, }) } fn set_brightness(&mut self, brightness: f32) -> Result<(), Error> { debug!("Brightness: {}", brightness); self.brightness_pwm.set_duty_cycle(brightness as f64)?; Ok(()) } } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> Display for LCD16x2Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, _: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let (weather_desc, temp_str) = console_weather_and_temp_str(&weather, 3, 14); // time is always 5 chars, date is always 10 chars let first_row = format!("{} {:>14}", console_time_str(&time), weather_desc); let second_row = format!("{} {:>9}", console_date_str(&time), temp_str); // Move to beginning of first row. self.lcd.reset(&mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&first_row), &mut Delay)?; // Move to line 2 self.lcd.set_cursor_pos(0x40, &mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&second_row), &mut Delay)?; let brightness = self.light_sensor.read_light_normalized()?; let min_brightness = 0.01; let brightness = brightness.max(min_brightness); self.set_brightness(brightness)?; Ok(()) } } #[cfg(target_arch = "arm")] pub struct LCD20x4Display<'a, T: LightSensor> { lcd: HD44780< FourBitBus< linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, linux_embedded_hal::Pin, >, >, brightness_pwm: Pwm, light_sensor: &'a T, } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> LCD20x4Display<'a, T> { pub fn new(light_sensor: &'a T) -> Result<Self, Error> { // Using BCM numbers // i.e. pin 0 corresponds to wiringpi 30 and physical 27 let rs = Pin::new(21); let en = Pin::new(20); let db4 = Pin::new(19); // prev: 26 let db5 = Pin::new(13); let db6 = Pin::new(6); let db7 = Pin::new(5); let r = Pin::new(17); // let g = Pin::new(16); // let b = Pin::new(19); let default_brightness = 1.0; // pwm0 is pin 18 let pwm0 = Pwm::with_frequency( Channel::Pwm0, 20000.0, default_brightness, Polarity::Normal, false, )?; pwm0.enable()?; rs.export()?; en.export()?; db4.export()?; db5.export()?; db6.export()?; db7.export()?; r.export()?; // g.export()?; // b.export()?; rs.set_direction(Direction::Low)?; en.set_direction(Direction::Low)?; db4.set_direction(Direction::Low)?; db5.set_direction(Direction::Low)?; db6.set_direction(Direction::Low)?; db7.set_direction(Direction::Low)?; r.set_direction(Direction::Low)?; // Default to red on; green and blue off // g.set_direction(Direction::High)?; // b.set_direction(Direction::High)?; let mut lcd = HD44780::new_4bit(rs, en, db4, db5, db6, db7, &mut Delay)?; lcd.reset(&mut Delay)?; lcd.clear(&mut Delay)?; lcd.set_display_mode( DisplayMode { display: HD44780DisplaySetting::On, cursor_visibility: Cursor::Invisible, cursor_blink: CursorBlink::Off, }, &mut Delay, )?; Ok(LCD20x4Display { lcd: lcd, brightness_pwm: pwm0, light_sensor: light_sensor, }) } fn set_brightness(&mut self, brightness: f32) -> Result<(), Error> { debug!("Brightness: {}", brightness); self.brightness_pwm.set_duty_cycle(brightness as f64)?; Ok(()) } } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> Display for LCD20x4Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, current_state_index: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let (weather_desc, temp_str) = console_weather_and_temp_str(&weather, 3, 14); let (high_temp_str, low_temp_str) = high_low_strs(&weather); // time is always 5 chars, date is always 10 chars let first_row = format!("{} {:>14}", console_time_str(&time), weather_desc); let second_row = format!("{} {:>9}", console_date_str(&time), temp_str); let third_row = ""; let fourth_row = match current_state_index { 0 => format!("{:<20}", rain_forecast_str(&weather)), 1 => format!("{:<20}", high_temp_str), 2 => format!("{:<20}", low_temp_str), _ => panic!("Invalid state index"), }; // Move to beginning of first row. self.lcd.reset(&mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&first_row), &mut Delay)?; // Move to line 2 self.lcd.set_cursor_pos(0x40, &mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&second_row), &mut Delay)?; // Move to line 3 self.lcd.set_cursor_pos(0x14, &mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&third_row), &mut Delay)?; // Move to line 4 self.lcd.set_cursor_pos(0x54, &mut Delay)?; self.lcd .write_bytes(&str_to_lcd_bytes(&fourth_row), &mut Delay)?; let brightness = self.light_sensor.read_light_normalized()?; let min_brightness = 0.01; let brightness = brightness.max(min_brightness); self.set_brightness(brightness)?; Ok(()) } } fn str_to_lcd_bytes(s: &str) -> Vec<u8> { s.replace("°", "#") // Pick a character that we know won't appear in the string elsewhere .as_bytes() .iter() .map(|&i| if i == '#' as u8 { 0xDF } else { i }) // 0xDF is the bytecode for the ° symbol .collect::<Vec<u8>>() } #[cfg(target_arch = "arm")] pub struct ILI9341Display<'a, T: LightSensor> { display: Ili9341< display_interface_spi::SPIInterface<Spi, linux_embedded_hal::Pin, linux_embedded_hal::Pin>, linux_embedded_hal::Pin, >, brightness_pwm: Pwm, light_sensor: &'a T, } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> ILI9341Display<'a, T> { pub fn new(light_sensor: &'a T) -> Result<Self, Error> { // Using BCM numbers // i.e. pin 0 corresponds to wiringpi 30 and physical 27 let default_brightness = 1.0; // pwm0 is pin 18 let pwm0 = Pwm::with_frequency( Channel::Pwm0, 20000.0, default_brightness, Polarity::Normal, false, )?; pwm0.enable()?; let rs = Pin::new(24); rs.export()?; rs.set_direction(Direction::Low)?; let cs = Pin::new(21); // TODO: can't use the CE0 pin in the display as it is already used by the SPI variable. cs.export()?; cs.set_direction(Direction::Low)?; let dc = Pin::new(25); dc.export()?; dc.set_direction(Direction::Low)?; let spi = Spi::new(Bus::Spi0, SlaveSelect::Ss0, 16_000_000, Mode::Mode0)?; let spi_di = display_interface_spi::SPIInterface::new(spi, dc, cs); let mut display = Ili9341::new(spi_di, rs, &mut Delay)?; display.set_orientation(Orientation::LandscapeFlipped)?; Ok(ILI9341Display { display: display, brightness_pwm: pwm0, light_sensor: light_sensor, }) } fn set_brightness(&mut self, brightness: f32) -> Result<(), Error> { debug!("LED brightness: {}", brightness); self.brightness_pwm.set_duty_cycle(brightness as f64)?; Ok(()) } } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> Display for ILI9341Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, _: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let day = &time.weekday().to_string()[0..3]; let month = &mmm_from_time(time); let first_row = format!("{:02}:{:02}", time.hour(), time.minute()); let second_row = format!("{} {} {:<2}", day, month, time.day()); let (third_row, fourth_row) = match weather { Some(w) => ( format!("{}", truncate_to_characters(&w.current.weather[0].main, 7)), format!("{:>3}°{}", &w.current.temp.round(), UNIT_CHAR), ), None => ("WEATHER".to_owned(), "ERR".to_owned()), }; let text = format!("{}\n{}\n{}", second_row, third_row, fourth_row); let background = egrectangle!( top_left = (0, 0), bottom_right = (320, 240), style = primitive_style!(fill_color = Rgb565::BLACK), ); let time_text = egtext!( text = &first_row, top_left = (20, 16), style = text_style!(font = Font24x32, text_color = Rgb565::RED), ); let other_text = egtext!( text = &text, top_left = (20, 48), style = text_style!(font = Font12x16, text_color = Rgb565::RED), ); background.draw(&mut self.display)?; time_text.draw(&mut self.display)?; other_text.draw(&mut self.display)?; let brightness = self.light_sensor.read_light_normalized()?; let min_brightness = 0.01; let brightness = brightness.max(min_brightness); self.set_brightness(brightness)?; Ok(()) } } #[cfg(target_arch = "arm")] pub struct AlphaNum4Display<'a, T: LightSensor> { ht16k33: HT16K33<I2c>, light_sensor: &'a T, } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> AlphaNum4Display<'a, T> { pub fn new(light_sensor: &'a T) -> Result<Self, Error> { // The I2C device address. let address = 0x71; // Create an I2C device. let mut i2c = I2c::new()?; i2c.set_slave_address(address as u16)?; let mut ht16k33 = HT16K33::new(i2c, address); ht16k33.initialize()?; ht16k33.set_display(ht16k33::Display::ON)?; Ok(AlphaNum4Display { ht16k33: ht16k33, light_sensor: light_sensor, }) } fn set_brightness(&mut self, brightness: f32) -> Result<(), Error> { let level = (brightness * 15.0).round() as u8; let dimming = ht16k33::Dimming::from_u8(level)?; debug!( "Current light level: {}, dimming level: {}/16", brightness, level ); self.ht16k33.set_dimming(dimming)?; Ok(()) } } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> Display for AlphaNum4Display<'a, T> { fn print( &mut self, _: &DateTime<Local>, _: u32, weather: &Option<OpenWeather>, ) -> Result<(), Error> { let [d1, d2, d3] = match weather { Some(w) => { let chars = format!("{:>3}", w.current.temp.round()) .chars() .collect::<Vec<char>>(); [chars[0], chars[1], chars[2]] } None => ['E', 'R', 'R'], }; let d4 = match weather { Some(_) => UNIT_CHAR, None => ' ', }; adafruit_alphanum4::AlphaNum4::update_buffer_with_char( &mut self.ht16k33, adafruit_alphanum4::Index::One, adafruit_alphanum4::AsciiChar::new(d1), ); adafruit_alphanum4::AlphaNum4::update_buffer_with_char( &mut self.ht16k33, adafruit_alphanum4::Index::Two, adafruit_alphanum4::AsciiChar::new(d2), ); adafruit_alphanum4::AlphaNum4::update_buffer_with_char( &mut self.ht16k33, adafruit_alphanum4::Index::Three, adafruit_alphanum4::AsciiChar::new(d3), ); adafruit_alphanum4::AlphaNum4::update_buffer_with_char( &mut self.ht16k33, adafruit_alphanum4::Index::Four, adafruit_alphanum4::AsciiChar::new(d4), ); self.ht16k33.write_display_buffer()?; let brightness = self.light_sensor.read_light_normalized()?; self.set_brightness(brightness)?; Ok(()) } } #[cfg(target_arch = "arm")] pub struct SevenSegment4Display<'a, T: LightSensor> { ht16k33: HT16K33<I2c>, light_sensor: &'a T, } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> SevenSegment4Display<'a, T> { pub fn new(light_sensor: &'a T) -> Result<Self, Error> { // The I2C device address. let address = 0x70; // Create an I2C device. let mut i2c = I2c::new()?; i2c.set_slave_address(address as u16)?; let mut ht16k33 = HT16K33::new(i2c, address); ht16k33.initialize()?; ht16k33.set_display(ht16k33::Display::ON)?; Ok(SevenSegment4Display { ht16k33: ht16k33, light_sensor: light_sensor, }) } fn set_brightness(&mut self, brightness: f32) -> Result<(), Error> { let level = (brightness * 15.0).round() as u8; let dimming = ht16k33::Dimming::from_u8(level)?; debug!("Brightness: {}, dimming level: {}/16", brightness, level); self.ht16k33.set_dimming(dimming)?; Ok(()) } } #[cfg(target_arch = "arm")] impl<'a, T: LightSensor> Display for SevenSegment4Display<'a, T> { fn print( &mut self, time: &DateTime<Local>, _: u32, _: &Option<OpenWeather>, ) -> Result<(), Error> { let [d1, d2, d3, d4] = split_time(time); adafruit_7segment::SevenSegment::update_buffer_with_digit( &mut self.ht16k33, adafruit_7segment::Index::One, d1, ); adafruit_7segment::SevenSegment::update_buffer_with_digit( &mut self.ht16k33, adafruit_7segment::Index::Two, d2, ); adafruit_7segment::SevenSegment::update_buffer_with_digit( &mut self.ht16k33, adafruit_7segment::Index::Three, d3, ); adafruit_7segment::SevenSegment::update_buffer_with_digit( &mut self.ht16k33, adafruit_7segment::Index::Four, d4, ); adafruit_7segment::SevenSegment::update_buffer_with_colon(&mut self.ht16k33, true); self.ht16k33.write_display_buffer()?; let brightness = self.light_sensor.read_light_normalized()?; self.set_brightness(brightness)?; Ok(()) } } fn split_time(t: &DateTime<Local>) -> [u8; 4] { let hour = t.hour(); let minute = t.minute(); let d4 = (minute % 10) as u8; let d3 = (minute / 10) as u8 % 10; let d2 = (hour % 10) as u8; let d1 = (hour / 10) as u8 % 10; [d1, d2, d3, d4] } fn truncate_to_characters(s: &str, length: usize) -> String { if s.len() <= length { return s.to_owned(); } format!("{}'{}", &s[0..1], &s[s.len() - length + 2..s.len()]) } #[cfg(test)] mod tests { use super::*; #[test] fn test_truncate_to_characters() { assert_eq!(truncate_to_characters("", 3), ""); assert_eq!(truncate_to_characters("a", 3), "a"); assert_eq!(truncate_to_characters("ab", 3), "ab"); assert_eq!(truncate_to_characters("abc", 3), "abc"); assert_eq!(truncate_to_characters("abcd", 3), "a'd"); assert_eq!(truncate_to_characters("abcdefg", 5), "a'efg"); assert_eq!(truncate_to_characters("Tornado", 7), "Tornado"); assert_eq!(truncate_to_characters("Thunderstorm", 7), "T'storm"); } #[test] fn test_split_time() -> Result<(), Box<dyn std::error::Error>> { assert_eq!( split_time(&Local::now().with_hour(1).unwrap().with_minute(3).unwrap()), [0, 1, 0, 3] ); assert_eq!( split_time(&Local::now().with_hour(0).unwrap().with_minute(0).unwrap()), [0, 0, 0, 0] ); assert_eq!( split_time(&Local::now().with_hour(12).unwrap().with_minute(34).unwrap()), [1, 2, 3, 4] ); assert_eq!( split_time(&Local::now().with_hour(23).unwrap().with_minute(59).unwrap()), [2, 3, 5, 9] ); Ok(()) } }
{ match weather { Some(w) => ( format!( "{:>width$}", truncate_to_characters(&w.current.weather[0].main, weather_chars), width = weather_chars ), format!( "{:>width$}°{}", w.current.temp.round(), UNIT_CHAR, width = temp_digits ), ), None => ( format!("{:>width$}", "WEATHER", width = weather_chars), format!("{:>width$}", "ERR", width = temp_digits + 2), ), } }
expect_column_values_to_be_null.py
from typing import Dict, Optional from great_expectations.core import ExpectationConfiguration from great_expectations.core.expectation_configuration import parse_result_format from great_expectations.execution_engine import ExecutionEngine from great_expectations.expectations.expectation import ( ColumnMapExpectation, _format_map_output, ) from great_expectations.expectations.util import render_evaluation_parameter_string from great_expectations.render.renderer.renderer import renderer from great_expectations.render.types import RenderedStringTemplateContent from great_expectations.render.util import ( num_to_str, parse_row_condition_string_pandas_engine, substitute_none_for_missing, ) from great_expectations.validator.validation_graph import MetricConfiguration class ExpectColumnValuesToBeNull(ColumnMapExpectation): """Expect column values to be null. expect_column_values_to_be_null is a \ :func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine .column_map_expectation>`. Args: column (str): \ The column name. Keyword Args: mostly (None or a float between 0 and 1): \ Return `"success": True` if at least mostly fraction of values match the expectation. \ For more detail, see :ref:`mostly`. Other Parameters: result_format (str or None): \ Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`. For more detail, see :ref:`result_format <result_format>`. include_config (boolean): \ If True, then include the expectation config as part of the result object. \ For more detail, see :ref:`include_config`. catch_exceptions (boolean or None): \ If True, then catch exceptions and include them as part of the result object. \ For more detail, see :ref:`catch_exceptions`. meta (dict or None): \ A JSON-serializable dictionary (nesting allowed) that will be included in the output without \ modification. For more detail, see :ref:`meta`. Returns: An ExpectationSuiteValidationResult Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and :ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`. See Also: :func:`expect_column_values_to_not_be_null \ <great_expectations.execution_engine.execution_engine.ExecutionEngine.expect_column_values_to_not_be_null>` """ # This dictionary contains metadata for display in the public gallery library_metadata = { "maturity": "production", "package": "great_expectations", "tags": ["core expectation", "column map expectation"], "contributors": ["@great_expectations"], "requirements": [], } map_metric = "column_values.null" @classmethod @renderer(renderer_type="renderer.prescriptive") @render_evaluation_parameter_string def _prescriptive_renderer( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs ): runtime_configuration = runtime_configuration or {} include_column_name = runtime_configuration.get("include_column_name", True) include_column_name = ( include_column_name if include_column_name is not None else True ) styling = runtime_configuration.get("styling") params = substitute_none_for_missing( configuration.kwargs, ["column", "mostly", "row_condition", "condition_parser"], ) if params["mostly"] is not None: params["mostly_pct"] = num_to_str( params["mostly"] * 100, precision=15, no_scientific=True ) # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".") template_str = "values must be null, at least $mostly_pct % of the time." else: template_str = "values must be null." if include_column_name: template_str = "$column " + template_str if params["row_condition"] is not None: ( conditional_template_str, conditional_params, ) = parse_row_condition_string_pandas_engine(params["row_condition"]) template_str = conditional_template_str + ", then " + template_str params.update(conditional_params) return [ RenderedStringTemplateContent( **{ "content_block_type": "string_template", "string_template": { "template": template_str, "params": params, "styling": styling, }, } ) ] @classmethod @renderer(renderer_type="renderer.diagnostic.observed_value") def
( cls, configuration=None, result=None, language=None, runtime_configuration=None, **kwargs ): result_dict = result.result try: notnull_percent = result_dict["unexpected_percent"] return ( num_to_str(100 - notnull_percent, precision=5, use_locale=True) + "% null" ) except KeyError: return "unknown % null" except TypeError: return "NaN% null" def get_validation_dependencies( self, configuration: Optional[ExpectationConfiguration] = None, execution_engine: Optional[ExecutionEngine] = None, runtime_configuration: Optional[dict] = None, ): dependencies = super().get_validation_dependencies( configuration, execution_engine, runtime_configuration ) # We do not need this metric for a null metric del dependencies["metrics"]["column_values.nonnull.unexpected_count"] return dependencies def _validate( self, configuration: ExpectationConfiguration, metrics: Dict, runtime_configuration: dict = None, execution_engine: ExecutionEngine = None, ): if runtime_configuration: result_format = runtime_configuration.get( "result_format", configuration.kwargs.get( "result_format", self.default_kwarg_values.get("result_format") ), ) else: result_format = configuration.kwargs.get( "result_format", self.default_kwarg_values.get("result_format") ) mostly = self.get_success_kwargs().get( "mostly", self.default_kwarg_values.get("mostly") ) total_count = metrics.get("table.row_count") unexpected_count = metrics.get(self.map_metric + ".unexpected_count") if total_count is None or total_count == 0: # Vacuously true success = True else: success_ratio = (total_count - unexpected_count) / total_count success = success_ratio >= mostly nonnull_count = None return _format_map_output( result_format=parse_result_format(result_format), success=success, element_count=metrics.get("table.row_count"), nonnull_count=nonnull_count, unexpected_count=metrics.get(self.map_metric + ".unexpected_count"), unexpected_list=metrics.get(self.map_metric + ".unexpected_values"), unexpected_index_list=metrics.get( self.map_metric + ".unexpected_index_list" ), )
_diagnostic_observed_value_renderer
test_keytool_parse.py
import pytest
class TestKeytoolParse: @staticmethod @pytest.mark.parametrize("printcert, correct_certs", [ ('Owner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]), ('Certificate[1]:\nOwner: CN=server, OU=server, O=server, L=server, ST=server, C=CA\nIssuer: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nSerial number: 4e2d045a\nValid from: Wed Apr 14 13:42:22 EDT 2021 until: Tue Jul 13 13:42:22 EDT 2021\nCertificate fingerprints:\n SHA1: 0B:BE:A7:40:20:F4:F0:DE:D1:C8:99:26:32:A8:33:7A:EB:E8:87:70\n SHA256: 83:C1:8D:49:A4:98:3F:73:66:97:63:78:4C:E5:70:BF:0C:A2:71:4A:58:CE:B0:4E:65:87:39:F0:06:1F:7F:2C\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n\n#2: ObjectId: 2.5.29.15 Criticality=true\nKeyUsage [\n DigitalSignature\n Key_Encipherment\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: 9B 06 D8 13 2E 6F 2F 62 85 66 42 A9 AC 86 2E A8 .....o/b.fB.....\n0010: 25 89 AB FC %...\n]\n]\n\n\nCertificate[2]:\nOwner: CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA\nIssuer: CN=root, OU=root, O=root, L=root, ST=root, C=CA\nSerial number: 5f822698\nValid from: Wed Apr 14 13:40:13 EDT 2021 until: Tue Jul 13 13:40:13 EDT 2021\nCertificate fingerprints:\n SHA1: 59:7C:A0:72:5D:98:9F:61:B9:9F:29:20:C8:73:60:9C:0E:02:EB:DF\n SHA256: AE:56:E7:5E:49:F2:1B:4B:FF:7A:76:12:6E:72:84:1C:6B:D3:E7:FA:D9:84:43:53:C7:24:A9:2F:3E:12:63:7F\nSignature algorithm name: SHA256withDSA\nSubject Public Key Algorithm: 2048-bit DSA key\nVersion: 3\n\nExtensions:\n\n#1: ObjectId: 2.5.29.35 Criticality=false\nAuthorityKeyIdentifier [\nKeyIdentifier [\n0000: 9D 76 79 BA 97 17 06 07 75 A6 5C E1 E6 98 09 F0 .vy.....u.\.....\n0010: D8 42 F6 C1 .B..\n]\n]\n\n#2: ObjectId: 2.5.29.19 Criticality=false\nBasicConstraints:[\n CA:true\n PathLen:0\n]\n\n#3: ObjectId: 2.5.29.14 Criticality=false\nSubjectKeyIdentifier [\nKeyIdentifier [\n0000: C2 BF E5 BF 85 2B ED 82 D2 F1 49 89 06 5B 5E 90 .....+....I..[^.\n0010: 64 FC C3 16 d...\n]\n]\n', [{'Owner': 'CN=server, OU=server, O=server, L=server, ST=server, C=CA', 'Issuer': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:42:22 EDT 2021', 'ValidTo': 'Tue Jul 13 13:42:22 EDT 2021'}, {'Owner': 'CN=ca, OU=ca, O=ca, L=ca, ST=ca, C=CA', 'Issuer': 'CN=root, OU=root, O=root, L=root, ST=root, C=CA', 'Country': 'CA', 'ValidFrom': 'Wed Apr 14 13:40:13 EDT 2021', 'ValidTo': 'Tue Jul 13 13:40:13 EDT 2021'}]), ] ) def test_certificate_chain_from_printcert(printcert, correct_certs): """ This function tests that a printcert output is properly parsed by certificate_chain_from_printcert. The certificates used come from running the commands in section 'Generate Certificates for an SSL Server' in the keytool docs: https://docs.oracle.com/javase/8/docs/technotes/tools/windows/keytool.html """ from assemblyline_v4_service.common.keytool_parse import certificate_chain_from_printcert certs = certificate_chain_from_printcert(printcert) assert len(certs) == len(correct_certs) for cert, correct in zip(certs, correct_certs): assert cert.country == correct['Country'] assert cert.issuer == correct['Issuer'] assert cert.owner == correct['Owner'] assert cert.valid_from == correct['ValidFrom'] assert cert.valid_to == correct['ValidTo']
lib.rs
mod chatter; pub mod command; mod credits; mod draw_system; mod game_object; mod game_object_type; mod interface; mod life_system; mod physics; mod running_state; mod splash; mod sprites; mod utilities; use chatter::Chatter; use command::Command; use credits::Credits; use draw_system::{DrawSystem, PlayerDrawSystem, TimerDrawSystem}; use game_object::GameObject; use game_object_type::GameObjectType; use ggez::audio; use ggez::audio::SoundSource; use ggez::event::EventHandler; use ggez::graphics::BLACK; use ggez::{graphics, timer, Context, GameResult}; use interface::Interface; use life_system::{LifeSystem, PlayerLifeSystem}; use physics::{PhysicsSystem, PlayerPhysics, TimerPhysicsSystem}; use running_state::RunningState; use splash::Splash; use sprites::Sprite; use std::{collections::HashMap, time::Duration}; use std::{ sync::mpsc::{channel, Receiver, Sender}, time::Instant, }; use twitch_chat_wrapper::chat_message::ChatMessage; pub const DROP_ZONE_COUNT: u8 = 10; const GAME_TIME: Duration = Duration::from_secs(120); pub const SPLASH_DURATION: Duration = Duration::from_secs(15); const LIVES: u8 = 3; const FRAMERATE_TARGET: u32 = 60; const SCORES_FILE_NAME: &str = "/high_scores"; pub struct GameState { send_to_chat: Sender<String>, receive_from_chat: Receiver<ChatMessage>, screen_size: (f32, f32), interface: Interface, game_objects: Vec<GameObject>, player_hit_object_event: Receiver<Chatter>, running_state: RunningState, credits: Option<Credits>, splash: Splash, game_start_time: Instant, object_sound: audio::Source, scores: HashMap<String, u128>, } impl GameState { pub fn new( send_to_chat: Sender<String>, receive_from_chat: Receiver<ChatMessage>, screen_size: (f32, f32), context: &mut Context, ) -> GameResult<GameState> { let game_started_message = format!("In {} seconds the Get the Streamer game will begin, you can play through chat with the commands on the right side of the game.", SPLASH_DURATION.as_secs()); send_to_chat.send(game_started_message).unwrap(); let mut interface = Interface::new(context, screen_size, LIVES)?; // create timer block let timer_game_object = Self::create_timer( screen_size, context, interface.width, SPLASH_DURATION, (0.0, 1.0, 0.0), )?; interface.add_game_object(timer_game_object); // create player let player_scale = 4.0; let player_forward_sprite = Sprite::new(context, "/player_forward.png", 8, 1)?; let player_left_sprite = Sprite::new(context, "/player_left.png", 8, 1)?; let player_draw_system = PlayerDrawSystem::new(player_left_sprite, player_forward_sprite, player_scale); let player_size = player_draw_system.get_size().unwrap_or((50.0, 50.0)); let (send_player_hit_object_event, receive_player_hit_object_event) = channel(); let player_physics_system = PlayerPhysics::new(context, send_player_hit_object_event); let player = GameObject::new( 250.0, 250.0, Some(Box::new(player_draw_system)), player_size.0, player_size.1, Some(Box::new(player_physics_system)), true, None, GameObjectType::Player, Some(Box::new(PlayerLifeSystem::new())), ); let game_objects = vec![player]; let splash = Splash::new( (screen_size.0 - interface.width, screen_size.1), context, SPLASH_DURATION, ); let game_start_time = Instant::now(); Ok(GameState { send_to_chat, receive_from_chat, screen_size, interface, game_objects, player_hit_object_event: receive_player_hit_object_event, running_state: RunningState::StartingSoon, credits: None, splash, game_start_time, object_sound: audio::Source::new(context, "/threeTone1.ogg").unwrap(), scores: HashMap::new(), }) } fn handle_command( &mut self, command: Option<Command>, context: &mut Context, ) -> GameResult<()> { if let Some(command) = command { let chatter = command.chatter.clone(); self.object_sound.play().unwrap(); self.game_objects.push(command.handle( self.interface.get_column_coordinates_by_index(command.id), context, )?); let score = self.scores.entry(chatter.name).or_insert(0); *score += 1; } Ok(()) } fn get_player(&self) -> Option<&GameObject> { self.game_objects .iter() .find(|game_object| game_object.my_type == GameObjectType::Player) } fn create_timer( screen_size: (f32, f32), context: &mut Context, interface_width: f32, duration: Duration, color: (f32, f32, f32), ) -> GameResult<GameObject> { let timer_draw_system = TimerDrawSystem::new(screen_size, context, color)?; let timer_size = timer_draw_system.get_size().unwrap_or((5.0, screen_size.1)); let timer_physics_system = TimerPhysicsSystem::new(timer_size.1, duration, FRAMERATE_TARGET as f32); let timer_game_object = GameObject::new( screen_size.0 - interface_width, 0.0, Some(Box::new(timer_draw_system)), timer_size.0, timer_size.1, Some(Box::new(timer_physics_system)), false, None, GameObjectType::Interface, None, ); Ok(timer_game_object) } fn
(&self, high_scores: &mut HashMap<String, u128>) { for (username, score) in &self.scores { let high_score = high_scores.entry(username.to_owned()).or_insert(0); *high_score += *score; } } fn send_game_started_message(&self) { let message = format!( "You have {} seconds to send your commands to Get the Streamer!", GAME_TIME.as_secs() ); if let Err(error) = self.send_to_chat.send(message) { eprintln!("error sending game started message to chat: {}", error); } } fn send_game_ended_message(&self, winner: RunningState) { let (highest_scorer, score) = self .get_highest_scorer() .unwrap_or_else(|| ("nobody".to_owned(), 0)); let message = match winner { RunningState::ChatWon => format!( "You all won, highest scorer was {} with {} points!", highest_scorer, score ), _ => format!( "The Streamer won the game despite the best efforts of {} who got {} points!", highest_scorer, score ), }; if let Err(error) = self.send_to_chat.send(message) { eprintln!("error sending game ended message to chat: {}", error); } } fn end_game(&mut self, new_running_state: RunningState) { self.send_game_ended_message(new_running_state); self.running_state = new_running_state; } fn get_highest_scorer(&self) -> Option<(String, u128)> { if let Some(scorer) = self.scores.iter().max_by(|a, b| a.1.cmp(b.1)) { Some((scorer.0.to_owned(), *scorer.1)) } else { None } } } impl EventHandler for GameState { fn update(&mut self, context: &mut Context) -> GameResult { if let Ok(chat_message) = self.receive_from_chat.try_recv() { if matches!(self.running_state, RunningState::Playing) { let chatter_name = if let Some(display_name) = chat_message.display_name { display_name } else { chat_message.name.clone() }; match Command::new( &chat_message.message, Chatter::new( chatter_name, chat_message.color_rgb, chat_message.subscriber, ), ) { Err(error) => self.send_to_chat.send(error.to_owned()).unwrap(), Ok(command) => self.handle_command(command, context)?, } } } while timer::check_update_time(context, FRAMERATE_TARGET) { match self.running_state { RunningState::StartingSoon => { if let Err(error) = self.interface.update(context, LIVES) { eprintln!("Error updating game objects in interface: {}", error); } if self.splash.is_done() { self.send_game_started_message(); self.running_state = RunningState::Playing; let timer = Self::create_timer( self.screen_size, context, self.interface.width, GAME_TIME, (1.0, 0.0, 0.0), )?; self.interface.add_game_object(timer); self.game_start_time = Instant::now(); } } RunningState::Playing => { // get the player lives left let lives_left = if let Some(player) = self.get_player() { player.get_lives_left().unwrap_or(3) } else { 0 }; if let Err(error) = self.interface.update(context, lives_left) { eprintln!("Error updating game objects in interface: {}", error); } let game_time_left = GAME_TIME.as_secs() - self.game_start_time.elapsed().as_secs(); if game_time_left == 0 { self.end_game(RunningState::PlayerWon); } let arena_size = ( self.screen_size.0 - self.interface.width, self.screen_size.1, ); let collidable_game_objects: Vec<GameObject> = self .game_objects .clone() .into_iter() .filter(|game_object| game_object.collidable) .collect(); self.game_objects.iter_mut().for_each(|game_object| { if let Err(error) = game_object.update( timer::time_since_start(context), arena_size, context, &collidable_game_objects, ) { eprintln!("error running update: {}", error) } }); self.game_objects .retain(|game_object| game_object.is_alive()); if let Ok(chatter) = self.player_hit_object_event.try_recv() { let message_to_chat = format!("Hit! {} gets 10 points", &chatter.name); self.send_to_chat.send(message_to_chat).unwrap(); let score = self.scores.entry(chatter.name).or_insert(0); *score += 10; } if self .game_objects .iter() .find(|game_object| game_object.my_type == GameObjectType::Player) .is_none() { self.end_game(RunningState::ChatWon); } } RunningState::ChatWon | RunningState::PlayerWon => { if let Some(credits) = &mut self.credits { if !credits.update() { ggez::event::quit(context); } } else { let mut high_scores = utilities::load_scores(SCORES_FILE_NAME, context); self.update_scores(&mut high_scores); if let Err(error) = utilities::save_scores(context, SCORES_FILE_NAME, &high_scores) { eprintln!("Error saving high scores to disk: {}", error); } self.credits = Some(Credits::new( self.running_state, context, self.screen_size, &high_scores, &self.scores, )?); } } } } Ok(()) } fn draw(&mut self, context: &mut Context) -> GameResult { graphics::clear(context, BLACK); self.interface .draw(context, self.screen_size, &self.running_state)?; match self.running_state { RunningState::StartingSoon => self.splash.draw(context)?, RunningState::Playing => { for game_object in self.game_objects.iter() { game_object.draw(context)?; } } RunningState::PlayerWon | RunningState::ChatWon => { if let Some(credits) = &self.credits { credits.draw(context)?; } } } graphics::present(context) } }
update_scores
cmdline.py
#!/usr/bin/env python # cardinal_pythonlib/cmdline.py """ =============================================================================== Original code copyright (C) 2009-2021 Rudolf Cardinal ([email protected]). This file is part of cardinal_pythonlib. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================== **Functions for manipulating command-line parameters.** """ import re # import shlex import subprocess import sys from typing import List, Union def
(s: str, platform: Union[int, str] = 'this') -> List[str]: """ As per https://stackoverflow.com/questions/33560364/python-windows-parsing-command-lines-with-shlex. Multi-platform variant of ``shlex.split()`` for command-line splitting. For use with ``subprocess``, for ``argv`` injection etc. Using fast REGEX. Args: s: string to split platform: - ``'this'`` = auto from current platform; - ``1`` = POSIX; - ``0`` = Windows/CMD - (other values reserved) """ # noqa if platform == 'this': platform = (sys.platform != 'win32') # RNC: includes 64-bit Windows if platform == 1: # POSIX re_cmd_lex = r'''"((?:\\["\\]|[^"])*)"|'([^']*)'|(\\.)|(&&?|\|\|?|\d?\>|[<])|([^\s'"\\&|<>]+)|(\s+)|(.)''' # noqa elif platform == 0: # Windows/CMD re_cmd_lex = r'''"((?:""|\\["\\]|[^"])*)"?()|(\\\\(?=\\*")|\\")|(&&?|\|\|?|\d?>|[<])|([^\s"&|<>]+)|(\s+)|(.)''' # noqa else: raise AssertionError(f"unknown platform {platform!r}") args = [] accu = None # collects pieces of one arg for qs, qss, esc, pipe, word, white, fail in re.findall(re_cmd_lex, s): if word: pass # most frequent elif esc: word = esc[1] elif white or pipe: if accu is not None: args.append(accu) if pipe: args.append(pipe) accu = None continue elif fail: raise ValueError("invalid or incomplete shell string") elif qs: word = qs.replace(r'\"', '"').replace(r'\\', '\\') # ... raw strings can't end in single backslashes; # https://stackoverflow.com/questions/647769/why-cant-pythons-raw-string-literals-end-with-a-single-backslash # noqa if platform == 0: word = word.replace('""', '"') else: word = qss # may be even empty; must be last accu = (accu or '') + word if accu is not None: args.append(accu) return args def cmdline_quote_posix(seq: List[str]) -> str: """ Quotes arguments for POSIX, producing a single string suitable for copying/pasting. Based on subprocess.list2cmdline(). """ result = [] # type: List[str] for arg in seq: bs_buf = [] # type: List[str] # Add a space to separate this argument from the others if result: result.append(' ') # Modified here: quote arguments with "*" needquote = (" " in arg) or ("\t" in arg) or ("*" in arg) or not arg if needquote: result.append('"') for c in arg: if c == '\\': # Don't know if we need to double yet. bs_buf.append(c) elif c == '"': # Double backslashes. result.append('\\' * len(bs_buf) * 2) bs_buf = [] result.append('\\"') else: # Normal char if bs_buf: result.extend(bs_buf) bs_buf = [] result.append(c) # Add remaining backslashes, if any. if bs_buf: result.extend(bs_buf) if needquote: result.extend(bs_buf) result.append('"') return ''.join(result) def cmdline_quote(args: List[str], platform: Union[int, str] = 'this') -> str: """ Convert a list of command-line arguments to a suitably quoted command-line string that should be copy/pastable into a comand prompt. """ if platform == 'this': platform = (sys.platform != 'win32') # RNC: includes 64-bit Windows if platform == 1: # POSIX return cmdline_quote_posix(args) elif platform == 0: # Windows/CMD return subprocess.list2cmdline(args) else: raise AssertionError(f"unknown platform {platform!r}")
cmdline_split
azuremachineproviderconfig_types.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field // for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. // Required parameters such as location that are not specified by this configuration, will be defaulted // by the actuator. // TODO: Update type // +k8s:openapi-gen=true type AzureMachineProviderSpec struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` // UserDataSecret contains a local reference to a secret that contains the // UserData to apply to the instance UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` // CredentialsSecret is a reference to the secret with Azure credentials. CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` Location string `json:"location"` VMSize string `json:"vmSize"` Image Image `json:"image"` OSDisk OSDisk `json:"osDisk"` SSHPublicKey string `json:"sshPublicKey"` SSHPrivateKey string `json:"sshPrivateKey"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object func init()
{ SchemeBuilder.Register(&AzureMachineProviderSpec{}) }
smoothed_load_optimization.rs
use crate::{algorithms::online::multi_dimensional::lazy_budgeting::smoothed_load_optimization::{ lb, Memory, Options, }, bindings::data_center::online::{ SLOResponse, SLOStepResponse,
DataCenterModelOutputFailure, }, problem::IntegralSmoothedLoadOptimization, streaming::online::{self, OfflineResponse}}; use pyo3::{exceptions::PyAssertionError, prelude::*}; /// Starts backend in a new thread. #[pyfunction] #[allow(clippy::type_complexity)] fn start( py: Python, addr: String, model: DataCenterModel, input: DataCenterOfflineInput, w: i32, options: Options, ) -> PyResult<SLOResponse<i32, Memory>> { py.allow_threads(|| { let OfflineResponse { xs: (xs, cost), int_xs: (int_xs, int_cost), m, runtime, } = online::start( addr.parse().unwrap(), model, &lb, options, w, input, None, ) .unwrap(); Ok(((xs.to_vec(), cost), (int_xs.to_vec(), int_cost), m, runtime)) }) } /// Executes next iteration of the algorithm. #[pyfunction] fn next( py: Python, addr: String, input: DataCenterOnlineInput, ) -> PyResult<SLOStepResponse<i32, Memory>> { py.allow_threads(|| { let ((x, cost), (int_x, int_cost), m, runtime) = online::next::< i32, IntegralSmoothedLoadOptimization, Memory, DataCenterOnlineInput, (), DataCenterModelOutputFailure, >(addr.parse().unwrap(), input) .map_err(PyAssertionError::new_err)?; Ok(((x.to_vec(), cost), (int_x.to_vec(), int_cost), m, runtime)) }) } /// Lazy Capacity Provisioning pub fn submodule(_py: Python, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(start, m)?)?; m.add_function(wrap_pyfunction!(next, m)?)?; m.add_class::<Options>()?; Ok(()) }
}, model::data_center::{ model::{ DataCenterModel, DataCenterOfflineInput, DataCenterOnlineInput, },
plus-gen.go
// Copyright 2020 Google LLC. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated file. DO NOT EDIT. // Package plus provides access to the Google+ API. // // For product documentation, see: https://developers.google.com/+/api/ // // Creating a client // // Usage example: // // import "google.golang.org/api/plus/v1" // ... // ctx := context.Background() // plusService, err := plus.NewService(ctx) // // In this example, Google Application Default Credentials are used for authentication. // // For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials. // // Other authentication options // // By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes: // // plusService, err := plus.NewService(ctx, option.WithScopes(plus.UserinfoProfileScope)) // // To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey: // // plusService, err := plus.NewService(ctx, option.WithAPIKey("AIza...")) // // To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource: // // config := &oauth2.Config{...} // // ... // token, err := config.Exchange(ctx, ...) // plusService, err := plus.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token))) // // See https://godoc.org/google.golang.org/api/option/ for details on options. package plus // import "google.golang.org/api/plus/v1" import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" googleapi "google.golang.org/api/googleapi" gensupport "google.golang.org/api/internal/gensupport" option "google.golang.org/api/option" internaloption "google.golang.org/api/option/internaloption" htransport "google.golang.org/api/transport/http" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled var _ = internaloption.WithDefaultEndpoint const apiId = "plus:v1" const apiName = "plus" const apiVersion = "v1" const basePath = "https://www.googleapis.com/plus/v1/" // OAuth2 scopes used by this API. const ( // View your basic profile info, including your age range and language PlusLoginScope = "https://www.googleapis.com/auth/plus.login" // Associate you with your personal info on Google PlusMeScope = "https://www.googleapis.com/auth/plus.me" // View your email address UserinfoEmailScope = "https://www.googleapis.com/auth/userinfo.email" // See your personal info, including any personal info you've made // publicly available UserinfoProfileScope = "https://www.googleapis.com/auth/userinfo.profile" ) // NewService creates a new Service. func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, error) { scopesOption := option.WithScopes( "https://www.googleapis.com/auth/plus.login", "https://www.googleapis.com/auth/plus.me", "https://www.googleapis.com/auth/userinfo.email", "https://www.googleapis.com/auth/userinfo.profile", ) // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err } s, err := New(client) if err != nil { return nil, err } if endpoint != "" { s.BasePath = endpoint } return s, nil } // New creates a new Service. It uses the provided http.Client for requests. // // Deprecated: please use NewService instead. // To provide a custom HTTP client, use option.WithHTTPClient. // If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead. func New(client *http.Client) (*Service, error) { if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Activities = NewActivitiesService(s) s.Comments = NewCommentsService(s) s.People = NewPeopleService(s) return s, nil } type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Activities *ActivitiesService Comments *CommentsService People *PeopleService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewActivitiesService(s *Service) *ActivitiesService
type ActivitiesService struct { s *Service } func NewCommentsService(s *Service) *CommentsService { rs := &CommentsService{s: s} return rs } type CommentsService struct { s *Service } func NewPeopleService(s *Service) *PeopleService { rs := &PeopleService{s: s} return rs } type PeopleService struct { s *Service } type Acl struct { // Description: Description of the access granted, suitable for display. Description string `json:"description,omitempty"` // Items: The list of access entries. Items []*PlusAclentryResource `json:"items,omitempty"` // Kind: Identifies this resource as a collection of access controls. // Value: "plus#acl". Kind string `json:"kind,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Acl) MarshalJSON() ([]byte, error) { type NoMethod Acl raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Activity struct { // Access: Identifies who has access to see this activity. Access *Acl `json:"access,omitempty"` // Actor: The person who performed this activity. Actor *ActivityActor `json:"actor,omitempty"` // Address: Street address where this activity occurred. Address string `json:"address,omitempty"` // Annotation: Additional content added by the person who shared this // activity, applicable only when resharing an activity. Annotation string `json:"annotation,omitempty"` // CrosspostSource: If this activity is a crosspost from another system, // this property specifies the ID of the original activity. CrosspostSource string `json:"crosspostSource,omitempty"` // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Geocode: Latitude and longitude where this activity occurred. Format // is latitude followed by longitude, space separated. Geocode string `json:"geocode,omitempty"` // Id: The ID of this activity. Id string `json:"id,omitempty"` // Kind: Identifies this resource as an activity. Value: // "plus#activity". Kind string `json:"kind,omitempty"` // Location: The location where this activity occurred. Location *Place `json:"location,omitempty"` // Object: The object of this activity. Object *ActivityObject `json:"object,omitempty"` // PlaceId: ID of the place where this activity occurred. PlaceId string `json:"placeId,omitempty"` // PlaceName: Name of the place where this activity occurred. PlaceName string `json:"placeName,omitempty"` // Provider: The service provider that initially published this // activity. Provider *ActivityProvider `json:"provider,omitempty"` // Published: The time at which this activity was initially published. // Formatted as an RFC 3339 timestamp. Published string `json:"published,omitempty"` // Radius: Radius, in meters, of the region where this activity // occurred, centered at the latitude and longitude identified in // geocode. Radius string `json:"radius,omitempty"` // Title: Title of this activity. Title string `json:"title,omitempty"` // Updated: The time at which this activity was last updated. Formatted // as an RFC 3339 timestamp. Updated string `json:"updated,omitempty"` // Url: The link to this activity. Url string `json:"url,omitempty"` // Verb: This activity's verb, which indicates the action that was // performed. Possible values include, but are not limited to, the // following values: // - "post" - Publish content to the stream. // - "share" - Reshare an activity. Verb string `json:"verb,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Access") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Access") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Activity) MarshalJSON() ([]byte, error) { type NoMethod Activity raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActor: The person who performed this activity. type ActivityActor struct { // ClientSpecificActorInfo: Actor info specific to particular clients. ClientSpecificActorInfo *ActivityActorClientSpecificActorInfo `json:"clientSpecificActorInfo,omitempty"` // DisplayName: The name of the actor, suitable for display. DisplayName string `json:"displayName,omitempty"` // Id: The ID of the actor's Person resource. Id string `json:"id,omitempty"` // Image: The image representation of the actor. Image *ActivityActorImage `json:"image,omitempty"` // Name: An object representation of the individual components of name. Name *ActivityActorName `json:"name,omitempty"` // Url: The link to the actor's Google profile. Url string `json:"url,omitempty"` // Verification: Verification status of actor. Verification *ActivityActorVerification `json:"verification,omitempty"` // ForceSendFields is a list of field names (e.g. // "ClientSpecificActorInfo") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ClientSpecificActorInfo") // to include in API requests with the JSON null value. By default, // fields with empty values are omitted from API requests. However, any // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ActivityActor) MarshalJSON() ([]byte, error) { type NoMethod ActivityActor raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActorClientSpecificActorInfo: Actor info specific to // particular clients. type ActivityActorClientSpecificActorInfo struct { // YoutubeActorInfo: Actor info specific to YouTube clients. YoutubeActorInfo *ActivityActorClientSpecificActorInfoYoutubeActorInfo `json:"youtubeActorInfo,omitempty"` // ForceSendFields is a list of field names (e.g. "YoutubeActorInfo") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "YoutubeActorInfo") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ActivityActorClientSpecificActorInfo) MarshalJSON() ([]byte, error) { type NoMethod ActivityActorClientSpecificActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActorClientSpecificActorInfoYoutubeActorInfo: Actor info // specific to YouTube clients. type ActivityActorClientSpecificActorInfoYoutubeActorInfo struct { // ChannelId: ID of the YouTube channel owned by the Actor. ChannelId string `json:"channelId,omitempty"` // ForceSendFields is a list of field names (e.g. "ChannelId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ChannelId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityActorClientSpecificActorInfoYoutubeActorInfo) MarshalJSON() ([]byte, error) { type NoMethod ActivityActorClientSpecificActorInfoYoutubeActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActorImage: The image representation of the actor. type ActivityActorImage struct { // Url: The URL of the actor's profile photo. To resize the image and // crop it to a square, append the query string ?sz=x, where x is the // dimension in pixels of each side. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Url") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Url") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityActorImage) MarshalJSON() ([]byte, error) { type NoMethod ActivityActorImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActorName: An object representation of the individual // components of name. type ActivityActorName struct { // FamilyName: The family name ("last name") of the actor. FamilyName string `json:"familyName,omitempty"` // GivenName: The given name ("first name") of the actor. GivenName string `json:"givenName,omitempty"` // ForceSendFields is a list of field names (e.g. "FamilyName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FamilyName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityActorName) MarshalJSON() ([]byte, error) { type NoMethod ActivityActorName raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityActorVerification: Verification status of actor. type ActivityActorVerification struct { // AdHocVerified: Verification for one-time or manual processes. AdHocVerified string `json:"adHocVerified,omitempty"` // ForceSendFields is a list of field names (e.g. "AdHocVerified") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AdHocVerified") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityActorVerification) MarshalJSON() ([]byte, error) { type NoMethod ActivityActorVerification raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObject: The object of this activity. type ActivityObject struct { // Actor: If this activity's object is itself another activity, such as // when a person reshares an activity, this property specifies the // original activity's actor. Actor *ActivityObjectActor `json:"actor,omitempty"` // Attachments: The media objects attached to this activity. Attachments []*ActivityObjectAttachments `json:"attachments,omitempty"` // Content: The HTML-formatted content, which is suitable for display. Content string `json:"content,omitempty"` // Id: The ID of the object. When resharing an activity, this is the ID // of the activity that is being reshared. Id string `json:"id,omitempty"` // ObjectType: The type of the object. Possible values include, but are // not limited to, the following values: // - "note" - Textual content. // - "activity" - A Google+ activity. ObjectType string `json:"objectType,omitempty"` // OriginalContent: The content (text) as provided by the author, which // is stored without any HTML formatting. When creating or updating an // activity, this value must be supplied as plain text in the request. OriginalContent string `json:"originalContent,omitempty"` // Plusoners: People who +1'd this activity. Plusoners *ActivityObjectPlusoners `json:"plusoners,omitempty"` // Replies: Comments in reply to this activity. Replies *ActivityObjectReplies `json:"replies,omitempty"` // Resharers: People who reshared this activity. Resharers *ActivityObjectResharers `json:"resharers,omitempty"` // Url: The URL that points to the linked resource. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Actor") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Actor") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObject) MarshalJSON() ([]byte, error) { type NoMethod ActivityObject raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectActor: If this activity's object is itself another // activity, such as when a person reshares an activity, this property // specifies the original activity's actor. type ActivityObjectActor struct { // ClientSpecificActorInfo: Actor info specific to particular clients. ClientSpecificActorInfo *ActivityObjectActorClientSpecificActorInfo `json:"clientSpecificActorInfo,omitempty"` // DisplayName: The original actor's name, which is suitable for // display. DisplayName string `json:"displayName,omitempty"` // Id: ID of the original actor. Id string `json:"id,omitempty"` // Image: The image representation of the original actor. Image *ActivityObjectActorImage `json:"image,omitempty"` // Url: A link to the original actor's Google profile. Url string `json:"url,omitempty"` // Verification: Verification status of actor. Verification *ActivityObjectActorVerification `json:"verification,omitempty"` // ForceSendFields is a list of field names (e.g. // "ClientSpecificActorInfo") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ClientSpecificActorInfo") // to include in API requests with the JSON null value. By default, // fields with empty values are omitted from API requests. However, any // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ActivityObjectActor) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectActor raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectActorClientSpecificActorInfo: Actor info specific to // particular clients. type ActivityObjectActorClientSpecificActorInfo struct { // YoutubeActorInfo: Actor info specific to YouTube clients. YoutubeActorInfo *ActivityObjectActorClientSpecificActorInfoYoutubeActorInfo `json:"youtubeActorInfo,omitempty"` // ForceSendFields is a list of field names (e.g. "YoutubeActorInfo") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "YoutubeActorInfo") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *ActivityObjectActorClientSpecificActorInfo) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectActorClientSpecificActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectActorClientSpecificActorInfoYoutubeActorInfo: Actor // info specific to YouTube clients. type ActivityObjectActorClientSpecificActorInfoYoutubeActorInfo struct { // ChannelId: ID of the YouTube channel owned by the Actor. ChannelId string `json:"channelId,omitempty"` // ForceSendFields is a list of field names (e.g. "ChannelId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ChannelId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectActorClientSpecificActorInfoYoutubeActorInfo) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectActorClientSpecificActorInfoYoutubeActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectActorImage: The image representation of the original // actor. type ActivityObjectActorImage struct { // Url: A URL that points to a thumbnail photo of the original actor. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Url") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Url") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectActorImage) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectActorImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectActorVerification: Verification status of actor. type ActivityObjectActorVerification struct { // AdHocVerified: Verification for one-time or manual processes. AdHocVerified string `json:"adHocVerified,omitempty"` // ForceSendFields is a list of field names (e.g. "AdHocVerified") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AdHocVerified") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectActorVerification) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectActorVerification raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ActivityObjectAttachments struct { // Content: If the attachment is an article, this property contains a // snippet of text from the article. It can also include descriptions // for other types. Content string `json:"content,omitempty"` // DisplayName: The title of the attachment, such as a photo caption or // an article title. DisplayName string `json:"displayName,omitempty"` // Embed: If the attachment is a video, the embeddable link. Embed *ActivityObjectAttachmentsEmbed `json:"embed,omitempty"` // FullImage: The full image URL for photo attachments. FullImage *ActivityObjectAttachmentsFullImage `json:"fullImage,omitempty"` // Id: The ID of the attachment. Id string `json:"id,omitempty"` // Image: The preview image for photos or videos. Image *ActivityObjectAttachmentsImage `json:"image,omitempty"` // ObjectType: The type of media object. Possible values include, but // are not limited to, the following values: // - "photo" - A photo. // - "album" - A photo album. // - "video" - A video. // - "article" - An article, specified by a link. ObjectType string `json:"objectType,omitempty"` // Thumbnails: If the attachment is an album, this property is a list of // potential additional thumbnails from the album. Thumbnails []*ActivityObjectAttachmentsThumbnails `json:"thumbnails,omitempty"` // Url: The link to the attachment, which should be of type text/html. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Content") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachments) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachments raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectAttachmentsEmbed: If the attachment is a video, the // embeddable link. type ActivityObjectAttachmentsEmbed struct { // Type: Media type of the link. Type string `json:"type,omitempty"` // Url: URL of the link. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachmentsEmbed) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachmentsEmbed raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectAttachmentsFullImage: The full image URL for photo // attachments. type ActivityObjectAttachmentsFullImage struct { // Height: The height, in pixels, of the linked resource. Height int64 `json:"height,omitempty"` // Type: Media type of the link. Type string `json:"type,omitempty"` // Url: URL of the image. Url string `json:"url,omitempty"` // Width: The width, in pixels, of the linked resource. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Height") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Height") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachmentsFullImage) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachmentsFullImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectAttachmentsImage: The preview image for photos or // videos. type ActivityObjectAttachmentsImage struct { // Height: The height, in pixels, of the linked resource. Height int64 `json:"height,omitempty"` // Type: Media type of the link. Type string `json:"type,omitempty"` // Url: Image URL. Url string `json:"url,omitempty"` // Width: The width, in pixels, of the linked resource. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Height") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Height") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachmentsImage) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachmentsImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ActivityObjectAttachmentsThumbnails struct { // Description: Potential name of the thumbnail. Description string `json:"description,omitempty"` // Image: Image resource. Image *ActivityObjectAttachmentsThumbnailsImage `json:"image,omitempty"` // Url: URL of the webpage containing the image. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachmentsThumbnails) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachmentsThumbnails raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectAttachmentsThumbnailsImage: Image resource. type ActivityObjectAttachmentsThumbnailsImage struct { // Height: The height, in pixels, of the linked resource. Height int64 `json:"height,omitempty"` // Type: Media type of the link. Type string `json:"type,omitempty"` // Url: Image url. Url string `json:"url,omitempty"` // Width: The width, in pixels, of the linked resource. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Height") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Height") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectAttachmentsThumbnailsImage) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectAttachmentsThumbnailsImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectPlusoners: People who +1'd this activity. type ActivityObjectPlusoners struct { // SelfLink: The URL for the collection of people who +1'd this // activity. SelfLink string `json:"selfLink,omitempty"` // TotalItems: Total number of people who +1'd this activity. TotalItems int64 `json:"totalItems,omitempty"` // ForceSendFields is a list of field names (e.g. "SelfLink") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "SelfLink") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectPlusoners) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectPlusoners raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectReplies: Comments in reply to this activity. type ActivityObjectReplies struct { // SelfLink: The URL for the collection of comments in reply to this // activity. SelfLink string `json:"selfLink,omitempty"` // TotalItems: Total number of comments on this activity. TotalItems int64 `json:"totalItems,omitempty"` // ForceSendFields is a list of field names (e.g. "SelfLink") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "SelfLink") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectReplies) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectReplies raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityObjectResharers: People who reshared this activity. type ActivityObjectResharers struct { // SelfLink: The URL for the collection of resharers. SelfLink string `json:"selfLink,omitempty"` // TotalItems: Total number of people who reshared this activity. TotalItems int64 `json:"totalItems,omitempty"` // ForceSendFields is a list of field names (e.g. "SelfLink") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "SelfLink") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityObjectResharers) MarshalJSON() ([]byte, error) { type NoMethod ActivityObjectResharers raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ActivityProvider: The service provider that initially published this // activity. type ActivityProvider struct { // Title: Name of the service provider. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Title") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Title") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityProvider) MarshalJSON() ([]byte, error) { type NoMethod ActivityProvider raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type ActivityFeed struct { // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Id: The ID of this collection of activities. Deprecated. Id string `json:"id,omitempty"` // Items: The activities in this page of results. Items []*Activity `json:"items,omitempty"` // Kind: Identifies this resource as a collection of activities. Value: // "plus#activityFeed". Kind string `json:"kind,omitempty"` // NextLink: Link to the next page of activities. NextLink string `json:"nextLink,omitempty"` // NextPageToken: The continuation token, which is used to page through // large result sets. Provide this value in a subsequent request to // return the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: Link to this activity resource. SelfLink string `json:"selfLink,omitempty"` // Title: The title of this collection of activities, which is a // truncated portion of the content. Title string `json:"title,omitempty"` // Updated: The time at which this collection of activities was last // updated. Formatted as an RFC 3339 timestamp. Updated string `json:"updated,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ActivityFeed) MarshalJSON() ([]byte, error) { type NoMethod ActivityFeed raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Comment struct { // Actor: The person who posted this comment. Actor *CommentActor `json:"actor,omitempty"` // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Id: The ID of this comment. Id string `json:"id,omitempty"` // InReplyTo: The activity this comment replied to. InReplyTo []*CommentInReplyTo `json:"inReplyTo,omitempty"` // Kind: Identifies this resource as a comment. Value: "plus#comment". Kind string `json:"kind,omitempty"` // Object: The object of this comment. Object *CommentObject `json:"object,omitempty"` // Plusoners: People who +1'd this comment. Plusoners *CommentPlusoners `json:"plusoners,omitempty"` // Published: The time at which this comment was initially published. // Formatted as an RFC 3339 timestamp. Published string `json:"published,omitempty"` // SelfLink: Link to this comment resource. SelfLink string `json:"selfLink,omitempty"` // Updated: The time at which this comment was last updated. Formatted // as an RFC 3339 timestamp. Updated string `json:"updated,omitempty"` // Verb: This comment's verb, indicating what action was performed. // Possible values are: // - "post" - Publish content to the stream. Verb string `json:"verb,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Actor") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Actor") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Comment) MarshalJSON() ([]byte, error) { type NoMethod Comment raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentActor: The person who posted this comment. type CommentActor struct { // ClientSpecificActorInfo: Actor info specific to particular clients. ClientSpecificActorInfo *CommentActorClientSpecificActorInfo `json:"clientSpecificActorInfo,omitempty"` // DisplayName: The name of this actor, suitable for display. DisplayName string `json:"displayName,omitempty"` // Id: The ID of the actor. Id string `json:"id,omitempty"` // Image: The image representation of this actor. Image *CommentActorImage `json:"image,omitempty"` // Url: A link to the Person resource for this actor. Url string `json:"url,omitempty"` // Verification: Verification status of actor. Verification *CommentActorVerification `json:"verification,omitempty"` // ForceSendFields is a list of field names (e.g. // "ClientSpecificActorInfo") to unconditionally include in API // requests. By default, fields with empty values are omitted from API // requests. However, any non-pointer, non-interface field appearing in // ForceSendFields will be sent to the server regardless of whether the // field is empty or not. This may be used to include empty fields in // Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ClientSpecificActorInfo") // to include in API requests with the JSON null value. By default, // fields with empty values are omitted from API requests. However, any // field with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *CommentActor) MarshalJSON() ([]byte, error) { type NoMethod CommentActor raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentActorClientSpecificActorInfo: Actor info specific to // particular clients. type CommentActorClientSpecificActorInfo struct { // YoutubeActorInfo: Actor info specific to YouTube clients. YoutubeActorInfo *CommentActorClientSpecificActorInfoYoutubeActorInfo `json:"youtubeActorInfo,omitempty"` // ForceSendFields is a list of field names (e.g. "YoutubeActorInfo") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "YoutubeActorInfo") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *CommentActorClientSpecificActorInfo) MarshalJSON() ([]byte, error) { type NoMethod CommentActorClientSpecificActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentActorClientSpecificActorInfoYoutubeActorInfo: Actor info // specific to YouTube clients. type CommentActorClientSpecificActorInfoYoutubeActorInfo struct { // ChannelId: ID of the YouTube channel owned by the Actor. ChannelId string `json:"channelId,omitempty"` // ForceSendFields is a list of field names (e.g. "ChannelId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ChannelId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentActorClientSpecificActorInfoYoutubeActorInfo) MarshalJSON() ([]byte, error) { type NoMethod CommentActorClientSpecificActorInfoYoutubeActorInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentActorImage: The image representation of this actor. type CommentActorImage struct { // Url: The URL of the actor's profile photo. To resize the image and // crop it to a square, append the query string ?sz=x, where x is the // dimension in pixels of each side. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Url") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Url") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentActorImage) MarshalJSON() ([]byte, error) { type NoMethod CommentActorImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentActorVerification: Verification status of actor. type CommentActorVerification struct { // AdHocVerified: Verification for one-time or manual processes. AdHocVerified string `json:"adHocVerified,omitempty"` // ForceSendFields is a list of field names (e.g. "AdHocVerified") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AdHocVerified") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentActorVerification) MarshalJSON() ([]byte, error) { type NoMethod CommentActorVerification raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type CommentInReplyTo struct { // Id: The ID of the activity. Id string `json:"id,omitempty"` // Url: The URL of the activity. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentInReplyTo) MarshalJSON() ([]byte, error) { type NoMethod CommentInReplyTo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentObject: The object of this comment. type CommentObject struct { // Content: The HTML-formatted content, suitable for display. Content string `json:"content,omitempty"` // ObjectType: The object type of this comment. Possible values are: // - "comment" - A comment in reply to an activity. ObjectType string `json:"objectType,omitempty"` // OriginalContent: The content (text) as provided by the author, stored // without any HTML formatting. When creating or updating a comment, // this value must be supplied as plain text in the request. OriginalContent string `json:"originalContent,omitempty"` // ForceSendFields is a list of field names (e.g. "Content") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Content") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentObject) MarshalJSON() ([]byte, error) { type NoMethod CommentObject raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // CommentPlusoners: People who +1'd this comment. type CommentPlusoners struct { // TotalItems: Total number of people who +1'd this comment. TotalItems int64 `json:"totalItems,omitempty"` // ForceSendFields is a list of field names (e.g. "TotalItems") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "TotalItems") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentPlusoners) MarshalJSON() ([]byte, error) { type NoMethod CommentPlusoners raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type CommentFeed struct { // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Id: The ID of this collection of comments. Id string `json:"id,omitempty"` // Items: The comments in this page of results. Items []*Comment `json:"items,omitempty"` // Kind: Identifies this resource as a collection of comments. Value: // "plus#commentFeed". Kind string `json:"kind,omitempty"` // NextLink: Link to the next page of activities. NextLink string `json:"nextLink,omitempty"` // NextPageToken: The continuation token, which is used to page through // large result sets. Provide this value in a subsequent request to // return the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Title: The title of this collection of comments. Title string `json:"title,omitempty"` // Updated: The time at which this collection of comments was last // updated. Formatted as an RFC 3339 timestamp. Updated string `json:"updated,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *CommentFeed) MarshalJSON() ([]byte, error) { type NoMethod CommentFeed raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PeopleFeed struct { // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Items: The people in this page of results. Each item includes the id, // displayName, image, and url for the person. To retrieve additional // profile data, see the people.get method. Items []*Person `json:"items,omitempty"` // Kind: Identifies this resource as a collection of people. Value: // "plus#peopleFeed". Kind string `json:"kind,omitempty"` // NextPageToken: The continuation token, which is used to page through // large result sets. Provide this value in a subsequent request to // return the next page of results. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: Link to this resource. SelfLink string `json:"selfLink,omitempty"` // Title: The title of this collection of people. Title string `json:"title,omitempty"` // TotalItems: The total number of people available in this list. The // number of people in a response might be smaller due to paging. This // might not be set for all collections. TotalItems int64 `json:"totalItems,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Etag") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Etag") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PeopleFeed) MarshalJSON() ([]byte, error) { type NoMethod PeopleFeed raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Person struct { // AboutMe: A short biography for this person. AboutMe string `json:"aboutMe,omitempty"` // AgeRange: The age range of the person. Valid ranges are 17 or // younger, 18 to 20, and 21 or older. Age is determined from the user's // birthday using Western age reckoning. AgeRange *PersonAgeRange `json:"ageRange,omitempty"` // Birthday: The person's date of birth, represented as YYYY-MM-DD. Birthday string `json:"birthday,omitempty"` // BraggingRights: The "bragging rights" line of this person. BraggingRights string `json:"braggingRights,omitempty"` // CircledByCount: For followers who are visible, the number of people // who have added this person or page to a circle. CircledByCount int64 `json:"circledByCount,omitempty"` // Cover: The cover photo content. Cover *PersonCover `json:"cover,omitempty"` // CurrentLocation: (this field is not currently used) CurrentLocation string `json:"currentLocation,omitempty"` // DisplayName: The name of this person, which is suitable for display. DisplayName string `json:"displayName,omitempty"` // Domain: The hosted domain name for the user's Google Apps account. // For instance, example.com. The plus.profile.emails.read or email // scope is needed to get this domain name. Domain string `json:"domain,omitempty"` // Emails: A list of email addresses that this person has, including // their Google account email address, and the public verified email // addresses on their Google+ profile. The plus.profile.emails.read // scope is needed to retrieve these email addresses, or the email scope // can be used to retrieve just the Google account email address. Emails []*PersonEmails `json:"emails,omitempty"` // Etag: ETag of this response for caching purposes. Etag string `json:"etag,omitempty"` // Gender: The person's gender. Possible values include, but are not // limited to, the following values: // - "male" - Male gender. // - "female" - Female gender. // - "other" - Other. Gender string `json:"gender,omitempty"` // Id: The ID of this person. Id string `json:"id,omitempty"` // Image: The representation of the person's profile photo. Image *PersonImage `json:"image,omitempty"` // IsPlusUser: Whether this user has signed up for Google+. IsPlusUser bool `json:"isPlusUser,omitempty"` // Kind: Identifies this resource as a person. Value: "plus#person". Kind string `json:"kind,omitempty"` // Language: The user's preferred language for rendering. Language string `json:"language,omitempty"` // Name: An object representation of the individual components of a // person's name. Name *PersonName `json:"name,omitempty"` // Nickname: The nickname of this person. Nickname string `json:"nickname,omitempty"` // ObjectType: Type of person within Google+. Possible values include, // but are not limited to, the following values: // - "person" - represents an actual person. // - "page" - represents a page. ObjectType string `json:"objectType,omitempty"` // Occupation: The occupation of this person. Occupation string `json:"occupation,omitempty"` // Organizations: A list of current or past organizations with which // this person is associated. Organizations []*PersonOrganizations `json:"organizations,omitempty"` // PlacesLived: A list of places where this person has lived. PlacesLived []*PersonPlacesLived `json:"placesLived,omitempty"` // PlusOneCount: If a Google+ Page, the number of people who have +1'd // this page. PlusOneCount int64 `json:"plusOneCount,omitempty"` // RelationshipStatus: The person's relationship status. Possible values // include, but are not limited to, the following values: // - "single" - Person is single. // - "in_a_relationship" - Person is in a relationship. // - "engaged" - Person is engaged. // - "married" - Person is married. // - "its_complicated" - The relationship is complicated. // - "open_relationship" - Person is in an open relationship. // - "widowed" - Person is widowed. // - "in_domestic_partnership" - Person is in a domestic partnership. // - "in_civil_union" - Person is in a civil union. RelationshipStatus string `json:"relationshipStatus,omitempty"` // Skills: The person's skills. Skills string `json:"skills,omitempty"` // Tagline: The brief description (tagline) of this person. Tagline string `json:"tagline,omitempty"` // Url: The URL of this person's profile. Url string `json:"url,omitempty"` // Urls: A list of URLs for this person. Urls []*PersonUrls `json:"urls,omitempty"` // Verified: Whether the person or Google+ Page has been verified. Verified bool `json:"verified,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AboutMe") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AboutMe") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Person) MarshalJSON() ([]byte, error) { type NoMethod Person raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonAgeRange: The age range of the person. Valid ranges are 17 or // younger, 18 to 20, and 21 or older. Age is determined from the user's // birthday using Western age reckoning. type PersonAgeRange struct { // Max: The age range's upper bound, if any. Possible values include, // but are not limited to, the following: // - "17" - for age 17 // - "20" - for age 20 Max int64 `json:"max,omitempty"` // Min: The age range's lower bound, if any. Possible values include, // but are not limited to, the following: // - "21" - for age 21 // - "18" - for age 18 Min int64 `json:"min,omitempty"` // ForceSendFields is a list of field names (e.g. "Max") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Max") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonAgeRange) MarshalJSON() ([]byte, error) { type NoMethod PersonAgeRange raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonCover: The cover photo content. type PersonCover struct { // CoverInfo: Extra information about the cover photo. CoverInfo *PersonCoverCoverInfo `json:"coverInfo,omitempty"` // CoverPhoto: The person's primary cover image. CoverPhoto *PersonCoverCoverPhoto `json:"coverPhoto,omitempty"` // Layout: The layout of the cover art. Possible values include, but are // not limited to, the following values: // - "banner" - One large image banner. Layout string `json:"layout,omitempty"` // ForceSendFields is a list of field names (e.g. "CoverInfo") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CoverInfo") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonCover) MarshalJSON() ([]byte, error) { type NoMethod PersonCover raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonCoverCoverInfo: Extra information about the cover photo. type PersonCoverCoverInfo struct { // LeftImageOffset: The difference between the left position of the // cover image and the actual displayed cover image. Only valid for // banner layout. LeftImageOffset int64 `json:"leftImageOffset,omitempty"` // TopImageOffset: The difference between the top position of the cover // image and the actual displayed cover image. Only valid for banner // layout. TopImageOffset int64 `json:"topImageOffset,omitempty"` // ForceSendFields is a list of field names (e.g. "LeftImageOffset") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "LeftImageOffset") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *PersonCoverCoverInfo) MarshalJSON() ([]byte, error) { type NoMethod PersonCoverCoverInfo raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonCoverCoverPhoto: The person's primary cover image. type PersonCoverCoverPhoto struct { // Height: The height of the image. Height int64 `json:"height,omitempty"` // Url: The URL of the image. Url string `json:"url,omitempty"` // Width: The width of the image. Width int64 `json:"width,omitempty"` // ForceSendFields is a list of field names (e.g. "Height") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Height") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonCoverCoverPhoto) MarshalJSON() ([]byte, error) { type NoMethod PersonCoverCoverPhoto raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PersonEmails struct { // Type: The type of address. Possible values include, but are not // limited to, the following values: // - "account" - Google account email address. // - "home" - Home email address. // - "work" - Work email address. // - "other" - Other. Type string `json:"type,omitempty"` // Value: The email address. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Type") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Type") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonEmails) MarshalJSON() ([]byte, error) { type NoMethod PersonEmails raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonImage: The representation of the person's profile photo. type PersonImage struct { // IsDefault: Whether the person's profile photo is the default one IsDefault bool `json:"isDefault,omitempty"` // Url: The URL of the person's profile photo. To resize the image and // crop it to a square, append the query string ?sz=x, where x is the // dimension in pixels of each side. Url string `json:"url,omitempty"` // ForceSendFields is a list of field names (e.g. "IsDefault") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "IsDefault") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonImage) MarshalJSON() ([]byte, error) { type NoMethod PersonImage raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PersonName: An object representation of the individual components of // a person's name. type PersonName struct { // FamilyName: The family name (last name) of this person. FamilyName string `json:"familyName,omitempty"` // Formatted: The full name of this person, including middle names, // suffixes, etc. Formatted string `json:"formatted,omitempty"` // GivenName: The given name (first name) of this person. GivenName string `json:"givenName,omitempty"` // HonorificPrefix: The honorific prefixes (such as "Dr." or "Mrs.") for // this person. HonorificPrefix string `json:"honorificPrefix,omitempty"` // HonorificSuffix: The honorific suffixes (such as "Jr.") for this // person. HonorificSuffix string `json:"honorificSuffix,omitempty"` // MiddleName: The middle name of this person. MiddleName string `json:"middleName,omitempty"` // ForceSendFields is a list of field names (e.g. "FamilyName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "FamilyName") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonName) MarshalJSON() ([]byte, error) { type NoMethod PersonName raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PersonOrganizations struct { // Department: The department within the organization. Deprecated. Department string `json:"department,omitempty"` // Description: A short description of the person's role in this // organization. Deprecated. Description string `json:"description,omitempty"` // EndDate: The date that the person left this organization. EndDate string `json:"endDate,omitempty"` // Location: The location of this organization. Deprecated. Location string `json:"location,omitempty"` // Name: The name of the organization. Name string `json:"name,omitempty"` // Primary: If "true", indicates this organization is the person's // primary one, which is typically interpreted as the current one. Primary bool `json:"primary,omitempty"` // StartDate: The date that the person joined this organization. StartDate string `json:"startDate,omitempty"` // Title: The person's job title or role within the organization. Title string `json:"title,omitempty"` // Type: The type of organization. Possible values include, but are not // limited to, the following values: // - "work" - Work. // - "school" - School. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Department") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Department") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonOrganizations) MarshalJSON() ([]byte, error) { type NoMethod PersonOrganizations raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PersonPlacesLived struct { // Primary: If "true", this place of residence is this person's primary // residence. Primary bool `json:"primary,omitempty"` // Value: A place where this person has lived. For example: "Seattle, // WA", "Near Toronto". Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Primary") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Primary") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonPlacesLived) MarshalJSON() ([]byte, error) { type NoMethod PersonPlacesLived raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type PersonUrls struct { // Label: The label of the URL. Label string `json:"label,omitempty"` // Type: The type of URL. Possible values include, but are not limited // to, the following values: // - "otherProfile" - URL for another profile. // - "contributor" - URL to a site for which this person is a // contributor. // - "website" - URL for this Google+ Page's primary website. // - "other" - Other URL. Type string `json:"type,omitempty"` // Value: The URL value. Value string `json:"value,omitempty"` // ForceSendFields is a list of field names (e.g. "Label") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Label") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PersonUrls) MarshalJSON() ([]byte, error) { type NoMethod PersonUrls raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } type Place struct { // Address: The physical address of the place. Address *PlaceAddress `json:"address,omitempty"` // DisplayName: The display name of the place. DisplayName string `json:"displayName,omitempty"` // Id: The id of the place. Id string `json:"id,omitempty"` // Kind: Identifies this resource as a place. Value: "plus#place". Kind string `json:"kind,omitempty"` // Position: The position of the place. Position *PlacePosition `json:"position,omitempty"` // ForceSendFields is a list of field names (e.g. "Address") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Address") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Place) MarshalJSON() ([]byte, error) { type NoMethod Place raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PlaceAddress: The physical address of the place. type PlaceAddress struct { // Formatted: The formatted address for display. Formatted string `json:"formatted,omitempty"` // ForceSendFields is a list of field names (e.g. "Formatted") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Formatted") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PlaceAddress) MarshalJSON() ([]byte, error) { type NoMethod PlaceAddress raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // PlacePosition: The position of the place. type PlacePosition struct { // Latitude: The latitude of this position. Latitude float64 `json:"latitude,omitempty"` // Longitude: The longitude of this position. Longitude float64 `json:"longitude,omitempty"` // ForceSendFields is a list of field names (e.g. "Latitude") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Latitude") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PlacePosition) MarshalJSON() ([]byte, error) { type NoMethod PlacePosition raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } func (s *PlacePosition) UnmarshalJSON(data []byte) error { type NoMethod PlacePosition var s1 struct { Latitude gensupport.JSONFloat64 `json:"latitude"` Longitude gensupport.JSONFloat64 `json:"longitude"` *NoMethod } s1.NoMethod = (*NoMethod)(s) if err := json.Unmarshal(data, &s1); err != nil { return err } s.Latitude = float64(s1.Latitude) s.Longitude = float64(s1.Longitude) return nil } type PlusAclentryResource struct { // DisplayName: A descriptive name for this entry. Suitable for display. DisplayName string `json:"displayName,omitempty"` // Id: The ID of the entry. For entries of type "person" or "circle", // this is the ID of the resource. For other types, this property is not // set. Id string `json:"id,omitempty"` // Type: The type of entry describing to whom access is granted. // Possible values are: // - "person" - Access to an individual. // - "circle" - Access to members of a circle. // - "myCircles" - Access to members of all the person's circles. // - "extendedCircles" - Access to members of all the person's circles, // plus all of the people in their circles. // - "domain" - Access to members of the person's Google Apps domain. // - "public" - Access to anyone on the web. Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "DisplayName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DisplayName") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *PlusAclentryResource) MarshalJSON() ([]byte, error) { type NoMethod PlusAclentryResource raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // method id "plus.activities.get": type ActivitiesGetCall struct { s *Service activityId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Shut down. See https://developers.google.com/+/api-shutdown for // more details. func (r *ActivitiesService) Get(activityId string) *ActivitiesGetCall { c := &ActivitiesGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.activityId = activityId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ActivitiesGetCall) Fields(s ...googleapi.Field) *ActivitiesGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ActivitiesGetCall) IfNoneMatch(entityTag string) *ActivitiesGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ActivitiesGetCall) Context(ctx context.Context) *ActivitiesGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ActivitiesGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ActivitiesGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "activities/{activityId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "activityId": c.activityId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.activities.get" call. // Exactly one of *Activity or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Activity.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ActivitiesGetCall) Do(opts ...googleapi.CallOption) (*Activity, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Activity{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.activities.get", // "parameterOrder": [ // "activityId" // ], // "parameters": { // "activityId": { // "description": "The ID of the activity to get.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "activities/{activityId}", // "response": { // "$ref": "Activity" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // method id "plus.activities.list": type ActivitiesListCall struct { s *Service userId string collection string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Shut down. See https://developers.google.com/+/api-shutdown for // more details. func (r *ActivitiesService) List(userId string, collection string) *ActivitiesListCall { c := &ActivitiesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.userId = userId c.collection = collection return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of activities to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *ActivitiesListCall) MaxResults(maxResults int64) *ActivitiesListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. func (c *ActivitiesListCall) PageToken(pageToken string) *ActivitiesListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ActivitiesListCall) Fields(s ...googleapi.Field) *ActivitiesListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ActivitiesListCall) IfNoneMatch(entityTag string) *ActivitiesListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ActivitiesListCall) Context(ctx context.Context) *ActivitiesListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ActivitiesListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ActivitiesListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "people/{userId}/activities/{collection}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "userId": c.userId, "collection": c.collection, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.activities.list" call. // Exactly one of *ActivityFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *ActivityFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ActivitiesListCall) Do(opts ...googleapi.CallOption) (*ActivityFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ActivityFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.activities.list", // "parameterOrder": [ // "userId", // "collection" // ], // "parameters": { // "collection": { // "description": "The collection of activities to list.", // "enum": [ // "public" // ], // "enumDescriptions": [ // "All public activities created by the specified user." // ], // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "default": "20", // "description": "The maximum number of activities to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "100", // "minimum": "1", // "type": "integer" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response.", // "location": "query", // "type": "string" // }, // "userId": { // "description": "The ID of the user to get activities for. The special value \"me\" can be used to indicate the authenticated user.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "people/{userId}/activities/{collection}", // "response": { // "$ref": "ActivityFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ActivitiesListCall) Pages(ctx context.Context, f func(*ActivityFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "plus.activities.search": type ActivitiesSearchCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Search: Shut down. See https://developers.google.com/+/api-shutdown // for more details. func (r *ActivitiesService) Search(query string) *ActivitiesSearchCall { c := &ActivitiesSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("query", query) return c } // Language sets the optional parameter "language": Specify the // preferred language to search with. See search language codes for // available values. func (c *ActivitiesSearchCall) Language(language string) *ActivitiesSearchCall { c.urlParams_.Set("language", language) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of activities to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *ActivitiesSearchCall) MaxResults(maxResults int64) *ActivitiesSearchCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // OrderBy sets the optional parameter "orderBy": Specifies how to order // search results. // // Possible values: // "best" - Sort activities by relevance to the user, most relevant // first. // "recent" (default) - Sort activities by published date, most recent // first. func (c *ActivitiesSearchCall) OrderBy(orderBy string) *ActivitiesSearchCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. This token can be of any // length. func (c *ActivitiesSearchCall) PageToken(pageToken string) *ActivitiesSearchCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ActivitiesSearchCall) Fields(s ...googleapi.Field) *ActivitiesSearchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ActivitiesSearchCall) IfNoneMatch(entityTag string) *ActivitiesSearchCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ActivitiesSearchCall) Context(ctx context.Context) *ActivitiesSearchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ActivitiesSearchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ActivitiesSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "activities") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.activities.search" call. // Exactly one of *ActivityFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *ActivityFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *ActivitiesSearchCall) Do(opts ...googleapi.CallOption) (*ActivityFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ActivityFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.activities.search", // "parameterOrder": [ // "query" // ], // "parameters": { // "language": { // "default": "en-US", // "description": "Specify the preferred language to search with. See search language codes for available values.", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "10", // "description": "The maximum number of activities to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "20", // "minimum": "1", // "type": "integer" // }, // "orderBy": { // "default": "recent", // "description": "Specifies how to order search results.", // "enum": [ // "best", // "recent" // ], // "enumDescriptions": [ // "Sort activities by relevance to the user, most relevant first.", // "Sort activities by published date, most recent first." // ], // "location": "query", // "type": "string" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response. This token can be of any length.", // "location": "query", // "type": "string" // }, // "query": { // "description": "Full-text search query string.", // "location": "query", // "required": true, // "type": "string" // } // }, // "path": "activities", // "response": { // "$ref": "ActivityFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ActivitiesSearchCall) Pages(ctx context.Context, f func(*ActivityFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "plus.comments.get": type CommentsGetCall struct { s *Service commentId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Shut down. See https://developers.google.com/+/api-shutdown for // more details. func (r *CommentsService) Get(commentId string) *CommentsGetCall { c := &CommentsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.commentId = commentId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CommentsGetCall) Fields(s ...googleapi.Field) *CommentsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CommentsGetCall) IfNoneMatch(entityTag string) *CommentsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CommentsGetCall) Context(ctx context.Context) *CommentsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CommentsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CommentsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "comments/{commentId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "commentId": c.commentId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.comments.get" call. // Exactly one of *Comment or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Comment.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *CommentsGetCall) Do(opts ...googleapi.CallOption) (*Comment, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Comment{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.comments.get", // "parameterOrder": [ // "commentId" // ], // "parameters": { // "commentId": { // "description": "The ID of the comment to get.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "comments/{commentId}", // "response": { // "$ref": "Comment" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // method id "plus.comments.list": type CommentsListCall struct { s *Service activityId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Shut down. See https://developers.google.com/+/api-shutdown for // more details. func (r *CommentsService) List(activityId string) *CommentsListCall { c := &CommentsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.activityId = activityId return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of comments to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *CommentsListCall) MaxResults(maxResults int64) *CommentsListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. func (c *CommentsListCall) PageToken(pageToken string) *CommentsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // SortOrder sets the optional parameter "sortOrder": The order in which // to sort the list of comments. // // Possible values: // "ascending" (default) - Sort oldest comments first. // "descending" - Sort newest comments first. func (c *CommentsListCall) SortOrder(sortOrder string) *CommentsListCall { c.urlParams_.Set("sortOrder", sortOrder) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *CommentsListCall) Fields(s ...googleapi.Field) *CommentsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *CommentsListCall) IfNoneMatch(entityTag string) *CommentsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *CommentsListCall) Context(ctx context.Context) *CommentsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *CommentsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *CommentsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "activities/{activityId}/comments") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "activityId": c.activityId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.comments.list" call. // Exactly one of *CommentFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *CommentFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *CommentsListCall) Do(opts ...googleapi.CallOption) (*CommentFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &CommentFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.comments.list", // "parameterOrder": [ // "activityId" // ], // "parameters": { // "activityId": { // "description": "The ID of the activity to get comments for.", // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "default": "20", // "description": "The maximum number of comments to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "500", // "minimum": "0", // "type": "integer" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response.", // "location": "query", // "type": "string" // }, // "sortOrder": { // "default": "ascending", // "description": "The order in which to sort the list of comments.", // "enum": [ // "ascending", // "descending" // ], // "enumDescriptions": [ // "Sort oldest comments first.", // "Sort newest comments first." // ], // "location": "query", // "type": "string" // } // }, // "path": "activities/{activityId}/comments", // "response": { // "$ref": "CommentFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *CommentsListCall) Pages(ctx context.Context, f func(*CommentFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "plus.people.get": type PeopleGetCall struct { s *Service userId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Get a person's profile. If your app uses scope // https://www.googleapis.com/auth/plus.login, this method is guaranteed // to return ageRange and language. func (r *PeopleService) Get(userId string) *PeopleGetCall { c := &PeopleGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.userId = userId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PeopleGetCall) Fields(s ...googleapi.Field) *PeopleGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *PeopleGetCall) IfNoneMatch(entityTag string) *PeopleGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PeopleGetCall) Context(ctx context.Context) *PeopleGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PeopleGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PeopleGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "people/{userId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "userId": c.userId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.people.get" call. // Exactly one of *Person or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Person.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *PeopleGetCall) Do(opts ...googleapi.CallOption) (*Person, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Person{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Get a person's profile. If your app uses scope https://www.googleapis.com/auth/plus.login, this method is guaranteed to return ageRange and language.", // "httpMethod": "GET", // "id": "plus.people.get", // "parameterOrder": [ // "userId" // ], // "parameters": { // "userId": { // "description": "The ID of the person to get the profile for. The special value \"me\" can be used to indicate the authenticated user.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "people/{userId}", // "response": { // "$ref": "Person" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me", // "https://www.googleapis.com/auth/userinfo.email", // "https://www.googleapis.com/auth/userinfo.profile" // ] // } } // method id "plus.people.list": type PeopleListCall struct { s *Service userId string collection string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: List all of the people in the specified collection. func (r *PeopleService) List(userId string, collection string) *PeopleListCall { c := &PeopleListCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.userId = userId c.collection = collection return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of people to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *PeopleListCall) MaxResults(maxResults int64) *PeopleListCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // OrderBy sets the optional parameter "orderBy": The order to return // people in. // // Possible values: // "alphabetical" - Order the people by their display name. // "best" - Order people based on the relevence to the viewer. func (c *PeopleListCall) OrderBy(orderBy string) *PeopleListCall { c.urlParams_.Set("orderBy", orderBy) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. func (c *PeopleListCall) PageToken(pageToken string) *PeopleListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PeopleListCall) Fields(s ...googleapi.Field) *PeopleListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *PeopleListCall) IfNoneMatch(entityTag string) *PeopleListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PeopleListCall) Context(ctx context.Context) *PeopleListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PeopleListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PeopleListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "people/{userId}/people/{collection}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "userId": c.userId, "collection": c.collection, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.people.list" call. // Exactly one of *PeopleFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *PeopleFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *PeopleListCall) Do(opts ...googleapi.CallOption) (*PeopleFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &PeopleFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "List all of the people in the specified collection.", // "httpMethod": "GET", // "id": "plus.people.list", // "parameterOrder": [ // "userId", // "collection" // ], // "parameters": { // "collection": { // "description": "The collection of people to list.", // "enum": [ // "connected", // "visible" // ], // "enumDescriptions": [ // "The list of visible people in the authenticated user's circles who also use the requesting app. This list is limited to users who made their app activities visible to the authenticated user.", // "The list of people who this user has added to one or more circles, limited to the circles visible to the requesting application." // ], // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "default": "100", // "description": "The maximum number of people to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "100", // "minimum": "1", // "type": "integer" // }, // "orderBy": { // "description": "The order to return people in.", // "enum": [ // "alphabetical", // "best" // ], // "enumDescriptions": [ // "Order the people by their display name.", // "Order people based on the relevence to the viewer." // ], // "location": "query", // "type": "string" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response.", // "location": "query", // "type": "string" // }, // "userId": { // "description": "Get the collection of people for the person identified. Use \"me\" to indicate the authenticated user.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "people/{userId}/people/{collection}", // "response": { // "$ref": "PeopleFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *PeopleListCall) Pages(ctx context.Context, f func(*PeopleFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "plus.people.listByActivity": type PeopleListByActivityCall struct { s *Service activityId string collection string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // ListByActivity: Shut down. See // https://developers.google.com/+/api-shutdown for more details. func (r *PeopleService) ListByActivity(activityId string, collection string) *PeopleListByActivityCall { c := &PeopleListByActivityCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.activityId = activityId c.collection = collection return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of people to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *PeopleListByActivityCall) MaxResults(maxResults int64) *PeopleListByActivityCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. func (c *PeopleListByActivityCall) PageToken(pageToken string) *PeopleListByActivityCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PeopleListByActivityCall) Fields(s ...googleapi.Field) *PeopleListByActivityCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *PeopleListByActivityCall) IfNoneMatch(entityTag string) *PeopleListByActivityCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PeopleListByActivityCall) Context(ctx context.Context) *PeopleListByActivityCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PeopleListByActivityCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PeopleListByActivityCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "activities/{activityId}/people/{collection}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "activityId": c.activityId, "collection": c.collection, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.people.listByActivity" call. // Exactly one of *PeopleFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *PeopleFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *PeopleListByActivityCall) Do(opts ...googleapi.CallOption) (*PeopleFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &PeopleFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.people.listByActivity", // "parameterOrder": [ // "activityId", // "collection" // ], // "parameters": { // "activityId": { // "description": "The ID of the activity to get the list of people for.", // "location": "path", // "required": true, // "type": "string" // }, // "collection": { // "description": "The collection of people to list.", // "enum": [ // "plusoners", // "resharers" // ], // "enumDescriptions": [ // "List all people who have +1'd this activity.", // "List all people who have reshared this activity." // ], // "location": "path", // "required": true, // "type": "string" // }, // "maxResults": { // "default": "20", // "description": "The maximum number of people to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "100", // "minimum": "1", // "type": "integer" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response.", // "location": "query", // "type": "string" // } // }, // "path": "activities/{activityId}/people/{collection}", // "response": { // "$ref": "PeopleFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *PeopleListByActivityCall) Pages(ctx context.Context, f func(*PeopleFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "plus.people.search": type PeopleSearchCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Search: Shut down. See https://developers.google.com/+/api-shutdown // for more details. func (r *PeopleService) Search(query string) *PeopleSearchCall { c := &PeopleSearchCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.urlParams_.Set("query", query) return c } // Language sets the optional parameter "language": Specify the // preferred language to search with. See search language codes for // available values. func (c *PeopleSearchCall) Language(language string) *PeopleSearchCall { c.urlParams_.Set("language", language) return c } // MaxResults sets the optional parameter "maxResults": The maximum // number of people to include in the response, which is used for // paging. For any response, the actual number returned might be less // than the specified maxResults. func (c *PeopleSearchCall) MaxResults(maxResults int64) *PeopleSearchCall { c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) return c } // PageToken sets the optional parameter "pageToken": The continuation // token, which is used to page through large result sets. To get the // next page of results, set this parameter to the value of // "nextPageToken" from the previous response. This token can be of any // length. func (c *PeopleSearchCall) PageToken(pageToken string) *PeopleSearchCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *PeopleSearchCall) Fields(s ...googleapi.Field) *PeopleSearchCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *PeopleSearchCall) IfNoneMatch(entityTag string) *PeopleSearchCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *PeopleSearchCall) Context(ctx context.Context) *PeopleSearchCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *PeopleSearchCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *PeopleSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20200514") for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "people") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "plus.people.search" call. // Exactly one of *PeopleFeed or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *PeopleFeed.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *PeopleSearchCall) Do(opts ...googleapi.CallOption) (*PeopleFeed, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &PeopleFeed{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Shut down. See https://developers.google.com/+/api-shutdown for more details.", // "httpMethod": "GET", // "id": "plus.people.search", // "parameterOrder": [ // "query" // ], // "parameters": { // "language": { // "default": "en-US", // "description": "Specify the preferred language to search with. See search language codes for available values.", // "location": "query", // "type": "string" // }, // "maxResults": { // "default": "25", // "description": "The maximum number of people to include in the response, which is used for paging. For any response, the actual number returned might be less than the specified maxResults.", // "format": "uint32", // "location": "query", // "maximum": "50", // "minimum": "1", // "type": "integer" // }, // "pageToken": { // "description": "The continuation token, which is used to page through large result sets. To get the next page of results, set this parameter to the value of \"nextPageToken\" from the previous response. This token can be of any length.", // "location": "query", // "type": "string" // }, // "query": { // "description": "Specify a query string for full text search of public text in all profiles.", // "location": "query", // "required": true, // "type": "string" // } // }, // "path": "people", // "response": { // "$ref": "PeopleFeed" // }, // "scopes": [ // "https://www.googleapis.com/auth/plus.login", // "https://www.googleapis.com/auth/plus.me" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *PeopleSearchCall) Pages(ctx context.Context, f func(*PeopleFeed) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } }
{ rs := &ActivitiesService{s: s} return rs }
views.py
import json from django.http import HttpResponse, HttpResponseRedirect from django.views.generic.base import View from django.conf import settings from django.db.models import get_model from haystack.query import SearchQuerySet from haystack import views from purl import URL Product = get_model('catalogue', 'Product') class SuggestionsView(View): """ Auto suggest view Returns the suggestions in JSON format (especially suited for consumption by jQuery autocomplete) """ suggest_limit = settings.OSCAR_SEARCH_SUGGEST_LIMIT def get(self, request): context = self.get_context_data() return self.render_to_response(context) def get_context_data(self): ''' Creates a list of suggestions ''' query_term = self.request.GET['query_term'] query_set = SearchQuerySet().filter(text__contains=query_term)[ :self.suggest_limit] context = []
}) return context def render_to_response(self, context): "Returns a JSON response containing 'context' as payload" return self.get_json_response(self.convert_context_to_json(context)) def get_json_response(self, content, **httpresponse_kwargs): "Construct an `HttpResponse` object." return HttpResponse(content, content_type='application/json', **httpresponse_kwargs) def convert_context_to_json(self, context): "Convert the context into a JSON object" return json.dumps(context) class FacetedSearchView(views.FacetedSearchView): def extra_context(self): extra = super(FacetedSearchView, self).extra_context() if 'fields' not in extra['facets']: # Looks like Solr is not responding correctly return extra # Convert facet data into a more useful datastructure # Field facets facet_data = {} base_url = URL(self.request.get_full_path()) selected = dict( map(lambda x: x.split(':'), self.form.selected_facets)) for field, facets in extra['facets']['fields'].items(): facet_data[field] = [] for name, count in facets: # Ignore zero-count facets for field if count == 0: continue field_filter = '%s_exact' % field datum = { 'name': name, 'count': count} if selected.get(field_filter, None) == name: # This filter is selected - build the 'deselect' URL datum['selected'] = True url = base_url.remove_query_param( 'selected_facets', '%s:%s' % ( field_filter, name)) datum['deselect_url'] = url.as_string() else: # This filter is not selected - built the 'select' URL datum['selected'] = False url = base_url.append_query_param( 'selected_facets', '%s:%s' % ( field_filter, name)) datum['select_url'] = url.as_string() facet_data[field].append(datum) # Query facets for key, facet in settings.OSCAR_SEARCH_FACETS['queries'].items(): facet_data[key] = [] for name, query in facet['queries']: field_filter = '%s_exact' % facet['field'] match = '%s_exact:%s' % (facet['field'], query) if not match in extra['facets']['queries']: datum = { 'name': name, 'count': 0, } else: datum = { 'name': name, 'count': extra['facets']['queries'][match], } if selected.get(field_filter, None) == query: # Selected datum['selected'] = True url = base_url.remove_query_param( 'selected_facets', match) datum['deselect_url'] = url.as_string() else: datum['selected'] = False url = base_url.append_query_param( 'selected_facets', match) datum['select_url'] = url.as_string() facet_data[key].append(datum) extra['facet_data'] = facet_data return extra class MultiFacetedSearchView(FacetedSearchView): """ Search view for multifaceted searches """ template = 'search/results.html' def __call__(self, request, *args, **kwargs): """ Generates the actual response to the search. Relies on internal, overridable methods to construct the response. """ # Look for UPC match query = request.GET.get('q', '').strip() try: item = Product._default_manager.get(upc=query) return HttpResponseRedirect(item.get_absolute_url()) except Product.DoesNotExist: pass return super(MultiFacetedSearchView, self).__call__(request, *args, **kwargs) @property def __name__(self): return "MultiFacetedSearchView" def extra_context(self): """ Adds details about the facets applied """ extra = super(MultiFacetedSearchView, self).extra_context() if hasattr(self.form, 'cleaned_data') and 'selected_facets' in self.form.cleaned_data: extra['facets_applied'] = [] for f in self.form.cleaned_data['selected_facets'].split("|"): facet = f.split(":") extra['facets_applied'].append({ 'facet': facet[0][:-6], # removing the _exact suffix that haystack uses for some reason 'value' : facet[1].strip('"') }) return extra
for item in query_set: context.append({ 'label': item.object.title, 'url': item.object.get_absolute_url(),
q_get_adversaries_x_industry.py
from flask import ( g, redirect, url_for ) from tmc.db import get_db, make_dicts # Get list of all adversaries per industry available in the database. def
(): db = get_db() try: db.row_factory = make_dicts #db.row_factory = lambda cursor, row: {row: row[0]} query = db.execute( 'SELECT adversary_id as ID, adversary_name as Name, adversary_identifiers as Identifiers, adversary_description as Description FROM adversaries ORDER BY Name').fetchall() return query except TypeError: #embed() return False #Change this for something more meaningful -- warning/alert
get_adversaries_x_industry
service_provider.go
package saml import ( "bytes" "compress/flate" "crypto/rsa" "crypto/tls" "crypto/x509" "encoding/base64" "encoding/xml" "errors" "fmt" "html/template" "io/ioutil" "net/http" "net/url" "regexp" "time" xrv "github.com/mattermost/xml-roundtrip-validator" "github.com/beevik/etree" dsig "github.com/russellhaering/goxmldsig" "github.com/russellhaering/goxmldsig/etreeutils" "github.com/crewjam/saml/xmlenc" ) // NameIDFormat is the format of the id type NameIDFormat string // Element returns an XML element representation of n. func (n NameIDFormat) Element() *etree.Element { el := etree.NewElement("") el.SetText(string(n)) return el } // Name ID formats const ( UnspecifiedNameIDFormat NameIDFormat = "urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified" TransientNameIDFormat NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:transient" EmailAddressNameIDFormat NameIDFormat = "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress" PersistentNameIDFormat NameIDFormat = "urn:oasis:names:tc:SAML:2.0:nameid-format:persistent" ) // SignatureVerifier verifies a signature // // Can be implemented in order to override ServiceProvider's default // way of verifying signatures. type SignatureVerifier interface { VerifySignature(validationContext *dsig.ValidationContext, el *etree.Element) error } // ServiceProvider implements SAML Service provider. // // In SAML, service providers delegate responsibility for identifying // clients to an identity provider. If you are writing an application // that uses passwords (or whatever) stored somewhere else, then you // are service provider. // // See the example directory for an example of a web application using // the service provider interface. type ServiceProvider struct { // Entity ID is optional - if not specified then MetadataURL will be used EntityID string // Key is the RSA private key we use to sign requests. Key *rsa.PrivateKey // Certificate is the RSA public part of Key. Certificate *x509.Certificate Intermediates []*x509.Certificate // MetadataURL is the full URL to the metadata endpoint on this host, // i.e. https://example.com/saml/metadata MetadataURL url.URL // AcsURL is the full URL to the SAML Assertion Customer Service endpoint // on this host, i.e. https://example.com/saml/acs AcsURL url.URL // SloURL is the full URL to the SAML Single Logout endpoint on this host. // i.e. https://example.com/saml/slo SloURL url.URL // IDPMetadata is the metadata from the identity provider. IDPMetadata *EntityDescriptor // AuthnNameIDFormat is the format used in the NameIDPolicy for // authentication requests AuthnNameIDFormat NameIDFormat // MetadataValidDuration is a duration used to calculate validUntil // attribute in the metadata endpoint MetadataValidDuration time.Duration // ForceAuthn allows you to force re-authentication of users even if the user // has a SSO session at the IdP. ForceAuthn *bool // AllowIdpInitiated AllowIDPInitiated bool // SignatureVerifier, if non-nil, allows you to implement an alternative way // to verify signatures. SignatureVerifier SignatureVerifier // SignatureMethod, if non-empty, authentication requests will be signed SignatureMethod string } // MaxIssueDelay is the longest allowed time between when a SAML assertion is // issued by the IDP and the time it is received by ParseResponse. This is used // to prevent old responses from being replayed (while allowing for some clock // drift between the SP and IDP). var MaxIssueDelay = time.Second * 90 // MaxClockSkew allows for leeway for clock skew between the IDP and SP when // validating assertions. It defaults to 180 seconds (matches shibboleth). var MaxClockSkew = time.Second * 180 // DefaultValidDuration is how long we assert that the SP metadata is valid. const DefaultValidDuration = time.Hour * 24 * 2 // DefaultCacheDuration is how long we ask the IDP to cache the SP metadata. const DefaultCacheDuration = time.Hour * 24 * 1 // Metadata returns the service provider metadata func (sp *ServiceProvider) Metadata() *EntityDescriptor { validDuration := DefaultValidDuration if sp.MetadataValidDuration > 0 { validDuration = sp.MetadataValidDuration } authnRequestsSigned := len(sp.SignatureMethod) > 0 wantAssertionsSigned := true validUntil := TimeNow().Add(validDuration) var keyDescriptors []KeyDescriptor if sp.Certificate != nil { certBytes := sp.Certificate.Raw for _, intermediate := range sp.Intermediates { certBytes = append(certBytes, intermediate.Raw...) } keyDescriptors = []KeyDescriptor{ { Use: "encryption", KeyInfo: KeyInfo{ Certificate: base64.StdEncoding.EncodeToString(certBytes), }, EncryptionMethods: []EncryptionMethod{ {Algorithm: "http://www.w3.org/2001/04/xmlenc#aes128-cbc"}, {Algorithm: "http://www.w3.org/2001/04/xmlenc#aes192-cbc"}, {Algorithm: "http://www.w3.org/2001/04/xmlenc#aes256-cbc"}, {Algorithm: "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"}, }, }, } if len(sp.SignatureMethod) > 0 { keyDescriptors = append(keyDescriptors, KeyDescriptor{ Use: "signing", KeyInfo: KeyInfo{ Certificate: base64.StdEncoding.EncodeToString(certBytes), }, }) } } return &EntityDescriptor{ EntityID: firstSet(sp.EntityID, sp.MetadataURL.String()), ValidUntil: validUntil, SPSSODescriptors: []SPSSODescriptor{ { SSODescriptor: SSODescriptor{ RoleDescriptor: RoleDescriptor{ ProtocolSupportEnumeration: "urn:oasis:names:tc:SAML:2.0:protocol", KeyDescriptors: keyDescriptors, ValidUntil: &validUntil, }, SingleLogoutServices: []Endpoint{ { Binding: HTTPPostBinding, Location: sp.SloURL.String(), ResponseLocation: sp.SloURL.String(), }, }, }, AuthnRequestsSigned: &authnRequestsSigned, WantAssertionsSigned: &wantAssertionsSigned, AssertionConsumerServices: []IndexedEndpoint{ { Binding: HTTPPostBinding, Location: sp.AcsURL.String(), Index: 1, }, }, }, }, } } // MakeRedirectAuthenticationRequest creates a SAML authentication request using // the HTTP-Redirect binding. It returns a URL that we will redirect the user to // in order to start the auth process. func (sp *ServiceProvider) MakeRedirectAuthenticationRequest(relayState string) (*url.URL, error) { req, err := sp.MakeAuthenticationRequest(sp.GetSSOBindingLocation(HTTPRedirectBinding), HTTPRedirectBinding) if err != nil { return nil, err } return req.Redirect(relayState, sp) } // Redirect returns a URL suitable for using the redirect binding with the request func (req *AuthnRequest) Redirect(relayState string, sp *ServiceProvider) (*url.URL, error) { w := &bytes.Buffer{} w1 := base64.NewEncoder(base64.StdEncoding, w) w2, _ := flate.NewWriter(w1, 9) doc := etree.NewDocument() doc.SetRoot(req.Element()) if _, err := doc.WriteTo(w2); err != nil { panic(err) } w2.Close() w1.Close() rv, _ := url.Parse(req.Destination) // We can't depend on Query().set() as order matters for signing query := rv.RawQuery if len(query) > 0 { query += "&SAMLRequest=" + url.QueryEscape(string(w.Bytes())) } else { query += "SAMLRequest=" + url.QueryEscape(string(w.Bytes())) } if relayState != "" { query += "&RelayState=" + relayState } if len(sp.SignatureMethod) > 0 { query += "&SigAlg=" + url.QueryEscape(sp.SignatureMethod) signingContext, err := GetSigningContext(sp) if err != nil { return nil, err } sig, err := signingContext.SignString(query) if err != nil { return nil, err } query += "&Signature=" + url.QueryEscape(base64.StdEncoding.EncodeToString(sig)) } rv.RawQuery = query return rv, nil } // GetSSOBindingLocation returns URL for the IDP's Single Sign On Service binding // of the specified type (HTTPRedirectBinding or HTTPPostBinding) func (sp *ServiceProvider) GetSSOBindingLocation(binding string) string { for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors { for _, singleSignOnService := range idpSSODescriptor.SingleSignOnServices { if singleSignOnService.Binding == binding { return singleSignOnService.Location } } } return "" } // GetSLOBindingLocation returns URL for the IDP's Single Log Out Service binding // of the specified type (HTTPRedirectBinding or HTTPPostBinding) func (sp *ServiceProvider) GetSLOBindingLocation(binding string) string { for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors { for _, singleLogoutService := range idpSSODescriptor.SingleLogoutServices { if singleLogoutService.Binding == binding { return singleLogoutService.Location } } } return "" } // getIDPSigningCerts returns the certificates which we can use to verify things // signed by the IDP in PEM format, or nil if no such certificate is found. func (sp *ServiceProvider) getIDPSigningCerts() ([]*x509.Certificate, error) { var certStrs []string for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors { for _, keyDescriptor := range idpSSODescriptor.KeyDescriptors { if keyDescriptor.Use == "signing" { certStrs = append(certStrs, keyDescriptor.KeyInfo.Certificate) } } } // If there are no explicitly signing certs, just return the first // non-empty cert we find. if len(certStrs) == 0 { for _, idpSSODescriptor := range sp.IDPMetadata.IDPSSODescriptors { for _, keyDescriptor := range idpSSODescriptor.KeyDescriptors { if keyDescriptor.Use == "" && keyDescriptor.KeyInfo.Certificate != "" { certStrs = append(certStrs, keyDescriptor.KeyInfo.Certificate) break } } } } if len(certStrs) == 0 { return nil, errors.New("cannot find any signing certificate in the IDP SSO descriptor") } var certs []*x509.Certificate // cleanup whitespace regex := regexp.MustCompile(`\s+`) for _, certStr := range certStrs { certStr = regex.ReplaceAllString(certStr, "") certBytes, err := base64.StdEncoding.DecodeString(certStr) if err != nil { return nil, fmt.Errorf("cannot parse certificate: %s", err) } parsedCert, err := x509.ParseCertificate(certBytes) if err != nil { return nil, err } certs = append(certs, parsedCert) } return certs, nil } // MakeAuthenticationRequest produces a new AuthnRequest object to send to the idpURL // that uses the specified binding (HTTPRedirectBinding or HTTPPostBinding) func (sp *ServiceProvider) MakeAuthenticationRequest(idpURL string, binding string) (*AuthnRequest, error) { allowCreate := true nameIDFormat := sp.nameIDFormat() req := AuthnRequest{ AssertionConsumerServiceURL: sp.AcsURL.String(), Destination: idpURL, ProtocolBinding: HTTPPostBinding, // default binding for the response ID: fmt.Sprintf("id-%x", randomBytes(20)), IssueInstant: TimeNow(), Version: "2.0", Issuer: &Issuer{ Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity", Value: firstSet(sp.EntityID, sp.MetadataURL.String()), }, NameIDPolicy: &NameIDPolicy{ AllowCreate: &allowCreate, // TODO(ross): figure out exactly policy we need // urn:mace:shibboleth:1.0:nameIdentifier // urn:oasis:names:tc:SAML:2.0:nameid-format:transient Format: &nameIDFormat, }, ForceAuthn: sp.ForceAuthn, } // We don't need to sign the XML document if the IDP uses HTTP-Redirect binding if len(sp.SignatureMethod) > 0 && binding == HTTPPostBinding { if err := sp.SignAuthnRequest(&req); err != nil { return nil, err } } return &req, nil } // GetSigningContext returns a dsig.SigningContext initialized based on the Service Provider's configuration func GetSigningContext(sp *ServiceProvider) (*dsig.SigningContext, error) { keyPair := tls.Certificate{ Certificate: [][]byte{sp.Certificate.Raw}, PrivateKey: sp.Key, Leaf: sp.Certificate, } // TODO: add intermediates for SP //for _, cert := range sp.Intermediates { // keyPair.Certificate = append(keyPair.Certificate, cert.Raw) //} keyStore := dsig.TLSCertKeyStore(keyPair) if sp.SignatureMethod != dsig.RSASHA1SignatureMethod && sp.SignatureMethod != dsig.RSASHA256SignatureMethod && sp.SignatureMethod != dsig.RSASHA512SignatureMethod { return nil, fmt.Errorf("invalid signing method %s", sp.SignatureMethod) } signatureMethod := sp.SignatureMethod signingContext := dsig.NewDefaultSigningContext(keyStore) signingContext.Canonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(canonicalizerPrefixList) if err := signingContext.SetSignatureMethod(signatureMethod); err != nil { return nil, err } return signingContext, nil } // SignAuthnRequest adds the `Signature` element to the `AuthnRequest`. func (sp *ServiceProvider) SignAuthnRequest(req *AuthnRequest) error { signingContext, err := GetSigningContext(sp) if err != nil { return err } assertionEl := req.Element() signedRequestEl, err := signingContext.SignEnveloped(assertionEl) if err != nil { return err } sigEl := signedRequestEl.Child[len(signedRequestEl.Child)-1] req.Signature = sigEl.(*etree.Element) return nil } // MakePostAuthenticationRequest creates a SAML authentication request using // the HTTP-POST binding. It returns HTML text representing an HTML form that // can be sent presented to a browser to initiate the login process. func (sp *ServiceProvider) MakePostAuthenticationRequest(relayState string) ([]byte, error) { req, err := sp.MakeAuthenticationRequest(sp.GetSSOBindingLocation(HTTPPostBinding), HTTPPostBinding) if err != nil { return nil, err } return req.Post(relayState), nil } // Post returns an HTML form suitable for using the HTTP-POST binding with the request func (req *AuthnRequest) Post(relayState string) []byte { doc := etree.NewDocument() doc.SetRoot(req.Element()) reqBuf, err := doc.WriteToBytes() if err != nil { panic(err) } encodedReqBuf := base64.StdEncoding.EncodeToString(reqBuf) tmpl := template.Must(template.New("saml-post-form").Parse(`` + `<form method="post" action="{{.URL}}" id="SAMLRequestForm">` + `<input type="hidden" name="SAMLRequest" value="{{.SAMLRequest}}" />` + `<input type="hidden" name="RelayState" value="{{.RelayState}}" />` + `<input id="SAMLSubmitButton" type="submit" value="Submit" />` + `</form>` + `<script>document.getElementById('SAMLSubmitButton').style.visibility="hidden";` + `document.getElementById('SAMLRequestForm').submit();</script>`)) data := struct { URL string SAMLRequest string RelayState string }{ URL: req.Destination, SAMLRequest: encodedReqBuf, RelayState: relayState, } rv := bytes.Buffer{} if err := tmpl.Execute(&rv, data); err != nil { panic(err) } return rv.Bytes() } // AssertionAttributes is a list of AssertionAttribute type AssertionAttributes []AssertionAttribute // Get returns the assertion attribute whose Name or FriendlyName // matches name, or nil if no matching attribute is found. func (aa AssertionAttributes) Get(name string) *AssertionAttribute { for _, attr := range aa { if attr.Name == name { return &attr } if attr.FriendlyName == name { return &attr } } return nil } // AssertionAttribute represents an attribute of the user extracted from // a SAML Assertion. type AssertionAttribute struct { FriendlyName string Name string Value string } // InvalidResponseError is the error produced by ParseResponse when it fails. // The underlying error is in PrivateErr. Response is the response as it was // known at the time validation failed. Now is the time that was used to validate // time-dependent parts of the assertion. type InvalidResponseError struct { PrivateErr error Response string Now time.Time } func (ivr *InvalidResponseError) Error() string { return fmt.Sprintf("Authentication failed") } // ErrBadStatus is returned when the assertion provided is valid but the // status code is not "urn:oasis:names:tc:SAML:2.0:status:Success". type ErrBadStatus struct { Status string } func (e ErrBadStatus) Error() string { return e.Status } func responseIsSigned(response *etree.Document) (bool, error) { signatureElement, err := findChild(response.Root(), "http://www.w3.org/2000/09/xmldsig#", "Signature") if err != nil { return false, err } return signatureElement != nil, nil } // validateDestination validates the Destination attribute. // If the response is signed, the Destination is required to be present. func (sp *ServiceProvider) validateDestination(response []byte, responseDom *Response) error { responseXML := etree.NewDocument() err := responseXML.ReadFromBytes(response) if err != nil { return err } signed, err := responseIsSigned(responseXML) if err != nil { return err } // Compare if the response is signed OR the Destination is provided. // (Even if the response is not signed, if the Destination is set it must match.) if signed || responseDom.Destination != "" { if responseDom.Destination != sp.AcsURL.String() { return fmt.Errorf("`Destination` does not match AcsURL (expected %q, actual %q)", sp.AcsURL.String(), responseDom.Destination) } } return nil } // ParseResponse extracts the SAML IDP response received in req, validates // it, and returns the verified assertion. func (sp *ServiceProvider) ParseResponse(req *http.Request, possibleRequestIDs []string) (*Assertion, error) { now := TimeNow() retErr := &InvalidResponseError{ Now: now, Response: req.PostForm.Get("SAMLResponse"), } rawResponseBuf, err := base64.StdEncoding.DecodeString(req.PostForm.Get("SAMLResponse")) if err != nil { retErr.PrivateErr = fmt.Errorf("cannot parse base64: %s", err) return nil, retErr } retErr.Response = string(rawResponseBuf) assertion, err := sp.ParseXMLResponse(rawResponseBuf, possibleRequestIDs) if err != nil { return nil, err } return assertion, nil } // ParseXMLResponse validates the SAML IDP response and // returns the verified assertion. // // This function handles decrypting the message, verifying the digital // signature on the assertion, and verifying that the specified conditions // and properties are met. // // If the function fails it will return an InvalidResponseError whose // properties are useful in describing which part of the parsing process // failed. However, to discourage inadvertent disclosure the diagnostic // information, the Error() method returns a static string. func (sp *ServiceProvider) ParseXMLResponse(decodedResponseXML []byte, possibleRequestIDs []string) (*Assertion, error) { now := TimeNow() var err error retErr := &InvalidResponseError{ Now: now, Response: string(decodedResponseXML), } // ensure that the response XML is well formed before we parse it if err := xrv.Validate(bytes.NewReader(decodedResponseXML)); err != nil { retErr.PrivateErr = fmt.Errorf("invalid xml: %s", err) return nil, retErr } // do some validation first before we decrypt resp := Response{} if err := xml.Unmarshal(decodedResponseXML, &resp); err != nil { retErr.PrivateErr = fmt.Errorf("cannot unmarshal response: %s", err) return nil, retErr } if err := sp.validateDestination(decodedResponseXML, &resp); err != nil { retErr.PrivateErr = err return nil, retErr } requestIDvalid := false if sp.AllowIDPInitiated { requestIDvalid = true } else { for _, possibleRequestID := range possibleRequestIDs { if resp.InResponseTo == possibleRequestID { requestIDvalid = true } } } if !requestIDvalid { retErr.PrivateErr = fmt.Errorf("`InResponseTo` does not match any of the possible request IDs (expected %v)", possibleRequestIDs) return nil, retErr } if resp.IssueInstant.Add(MaxIssueDelay).Before(now) { retErr.PrivateErr = fmt.Errorf("response IssueInstant expired at %s", resp.IssueInstant.Add(MaxIssueDelay)) return nil, retErr } if resp.Issuer != nil && resp.Issuer.Value != sp.IDPMetadata.EntityID { retErr.PrivateErr = fmt.Errorf("response Issuer does not match the IDP metadata (expected %q)", sp.IDPMetadata.EntityID) return nil, retErr } if resp.Status.StatusCode.Value != StatusSuccess { retErr.PrivateErr = ErrBadStatus{Status: resp.Status.StatusCode.Value} return nil, retErr } var assertion *Assertion if resp.EncryptedAssertion == nil { doc := etree.NewDocument() if err := doc.ReadFromBytes(decodedResponseXML); err != nil { retErr.PrivateErr = err return nil, retErr } // TODO(ross): verify that the namespace is urn:oasis:names:tc:SAML:2.0:protocol responseEl := doc.Root() if responseEl.Tag != "Response" { retErr.PrivateErr = fmt.Errorf("expected to find a response object, not %s", doc.Root().Tag) return nil, retErr } if err = sp.validateSigned(responseEl); err != nil { retErr.PrivateErr = err return nil, retErr } assertion = resp.Assertion } // decrypt the response if resp.EncryptedAssertion != nil { doc := etree.NewDocument() if err := doc.ReadFromBytes(decodedResponseXML); err != nil { retErr.PrivateErr = err return nil, retErr } // encrypted assertions are part of the signature // before decrypting the response verify that responseSigned, err := responseIsSigned(doc) if err != nil { retErr.PrivateErr = err return nil, retErr } if responseSigned { if err := sp.validateSigned(doc.Root()); err != nil { retErr.PrivateErr = err return nil, retErr } } var key interface{} = sp.Key keyEl := doc.FindElement("//EncryptedAssertion/EncryptedKey") if keyEl != nil { key, err = xmlenc.Decrypt(sp.Key, keyEl) if err != nil { retErr.PrivateErr = fmt.Errorf("failed to decrypt key from response: %s", err) return nil, retErr } } el := doc.FindElement("//EncryptedAssertion/EncryptedData") plaintextAssertion, err := xmlenc.Decrypt(key, el) if err != nil { retErr.PrivateErr = fmt.Errorf("failed to decrypt response: %s", err) return nil, retErr } retErr.Response = string(plaintextAssertion) // TODO(ross): add test case for this if err := xrv.Validate(bytes.NewReader(plaintextAssertion)); err != nil { retErr.PrivateErr = fmt.Errorf("plaintext response contains invalid XML: %s", err) return nil, retErr } doc = etree.NewDocument() if err := doc.ReadFromBytes(plaintextAssertion); err != nil { retErr.PrivateErr = fmt.Errorf("cannot parse plaintext response %v", err) return nil, retErr } // the decrypted assertion may be signed too // otherwise, a signed response is sufficient if err := sp.validateSigned(doc.Root()); err != nil && !responseSigned { retErr.PrivateErr = err return nil, retErr } assertion = &Assertion{} // Note: plaintextAssertion is known to be safe to parse because // plaintextAssertion is unmodified from when xrv.Validate() was called above. if err := xml.Unmarshal(plaintextAssertion, assertion); err != nil { retErr.PrivateErr = err return nil, retErr } } if err := sp.validateAssertion(assertion, possibleRequestIDs, now); err != nil { retErr.PrivateErr = fmt.Errorf("assertion invalid: %s", err) return nil, retErr } return assertion, nil } // validateAssertion checks that the conditions specified in assertion match // the requirements to accept. If validation fails, it returns an error describing // the failure. (The digital signature on the assertion is not checked -- this // should be done before calling this function). func (sp *ServiceProvider) validateAssertion(assertion *Assertion, possibleRequestIDs []string, now time.Time) error { if assertion.IssueInstant.Add(MaxIssueDelay).Before(now) { return fmt.Errorf("expired on %s", assertion.IssueInstant.Add(MaxIssueDelay)) } if assertion.Issuer.Value != sp.IDPMetadata.EntityID { return fmt.Errorf("issuer is not %q", sp.IDPMetadata.EntityID) } for _, subjectConfirmation := range assertion.Subject.SubjectConfirmations { requestIDvalid := false // We *DO NOT* validate InResponseTo when AllowIDPInitiated is set. Here's why: // // The SAML specification does not provide clear guidance for handling InResponseTo for IDP-initiated // requests where there is no request to be in response to. The specification says: // // InResponseTo [Optional] // The ID of a SAML protocol message in response to which an attesting entity can present the // assertion. For example, this attribute might be used to correlate the assertion to a SAML // request that resulted in its presentation. // // The initial thought was that we should specify a single empty string in possibleRequestIDs for IDP-initiated // requests so that we would ensure that an InResponseTo was *not* provided in those cases where it wasn't // expected. Even that turns out to be frustrating for users. And in practice some IDPs (e.g. Rippling) // set a specific non-empty value for InResponseTo in IDP-initiated requests. // // Finally, it is unclear that there is significant security value in checking InResponseTo when we allow // IDP initiated assertions. if !sp.AllowIDPInitiated { for _, possibleRequestID := range possibleRequestIDs { if subjectConfirmation.SubjectConfirmationData.InResponseTo == possibleRequestID { requestIDvalid = true break } } if !requestIDvalid { return fmt.Errorf("assertion SubjectConfirmation one of the possible request IDs (%v)", possibleRequestIDs) } } if subjectConfirmation.SubjectConfirmationData.Recipient != sp.AcsURL.String() { return fmt.Errorf("assertion SubjectConfirmation Recipient is not %s", sp.AcsURL.String()) } if subjectConfirmation.SubjectConfirmationData.NotOnOrAfter.Add(MaxClockSkew).Before(now) { return fmt.Errorf("assertion SubjectConfirmationData is expired") } } if assertion.Conditions.NotBefore.Add(-MaxClockSkew).After(now) { return fmt.Errorf("assertion Conditions is not yet valid") } if assertion.Conditions.NotOnOrAfter.Add(MaxClockSkew).Before(now) { return fmt.Errorf("assertion Conditions is expired") } audienceRestrictionsValid := len(assertion.Conditions.AudienceRestrictions) == 0 audience := firstSet(sp.EntityID, sp.MetadataURL.String()) for _, audienceRestriction := range assertion.Conditions.AudienceRestrictions { if audienceRestriction.Audience.Value == audience { audienceRestrictionsValid = true } } if !audienceRestrictionsValid { return fmt.Errorf("assertion Conditions AudienceRestriction does not contain %q", audience) } return nil } func findChild(parentEl *etree.Element, childNS string, childTag string) (*etree.Element, error) { for _, childEl := range parentEl.ChildElements() { if childEl.Tag != childTag { continue } ctx, err := etreeutils.NSBuildParentContext(childEl) if err != nil { return nil, err } ctx, err = ctx.SubContext(childEl) if err != nil { return nil, err } ns, err := ctx.LookupPrefix(childEl.Space) if err != nil { return nil, fmt.Errorf("[%s]:%s cannot find prefix %s: %v", childNS, childTag, childEl.Space, err) } if ns != childNS { continue } return childEl, nil } return nil, nil } // validateSigned returns a nil error iff each of the signatures on the Response and Assertion elements // are valid and there is at least one signature. func (sp *ServiceProvider) validateSigned(responseEl *etree.Element) error { haveSignature := false // Some SAML responses have the signature on the Response object, and some on the Assertion // object, and some on both. We will require that at least one signature be present and that // all signatures be valid sigEl, err := findChild(responseEl, "http://www.w3.org/2000/09/xmldsig#", "Signature") if err != nil { return err } if sigEl != nil { if err = sp.validateSignature(responseEl); err != nil { return fmt.Errorf("cannot validate signature on Response: %v", err) } haveSignature = true } assertionEl, err := findChild(responseEl, "urn:oasis:names:tc:SAML:2.0:assertion", "Assertion") if err != nil { return err } if assertionEl != nil { sigEl, err := findChild(assertionEl, "http://www.w3.org/2000/09/xmldsig#", "Signature") if err != nil { return err } if sigEl != nil { if err = sp.validateSignature(assertionEl); err != nil { return fmt.Errorf("cannot validate signature on Response: %v", err) } haveSignature = true } } if !haveSignature { return errors.New("either the Response or Assertion must be signed") } return nil } // validateSignature returns nill iff the Signature embedded in the element is valid func (sp *ServiceProvider) validateSignature(el *etree.Element) error { certs, err := sp.getIDPSigningCerts() if err != nil { return err } certificateStore := dsig.MemoryX509CertificateStore{ Roots: certs, } validationContext := dsig.NewDefaultValidationContext(&certificateStore) validationContext.IdAttribute = "ID" if Clock != nil { validationContext.Clock = Clock } // Some SAML responses contain a RSAKeyValue element. One of two things is happening here: // // (1) We're getting something signed by a key we already know about -- the public key // of the signing cert provided in the metadata. // (2) We're getting something signed by a key we *don't* know about, and which we have // no ability to verify. // // The best course of action is to just remove the KeyInfo so that dsig falls back to // verifying against the public key provided in the metadata. if el.FindElement("./Signature/KeyInfo/X509Data/X509Certificate") == nil { if sigEl := el.FindElement("./Signature"); sigEl != nil { if keyInfo := sigEl.FindElement("KeyInfo"); keyInfo != nil { sigEl.RemoveChild(keyInfo) } } } ctx, err := etreeutils.NSBuildParentContext(el) if err != nil { return err } ctx, err = ctx.SubContext(el) if err != nil { return err } el, err = etreeutils.NSDetatch(ctx, el) if err != nil { return err } if sp.SignatureVerifier != nil { return sp.SignatureVerifier.VerifySignature(validationContext, el) } _, err = validationContext.Validate(el) return err } // SignLogoutRequest adds the `Signature` element to the `LogoutRequest`. func (sp *ServiceProvider) SignLogoutRequest(req *LogoutRequest) error { keyPair := tls.Certificate{ Certificate: [][]byte{sp.Certificate.Raw}, PrivateKey: sp.Key, Leaf: sp.Certificate, } // TODO: add intermediates for SP //for _, cert := range sp.Intermediates { // keyPair.Certificate = append(keyPair.Certificate, cert.Raw) //} keyStore := dsig.TLSCertKeyStore(keyPair) if sp.SignatureMethod != dsig.RSASHA1SignatureMethod && sp.SignatureMethod != dsig.RSASHA256SignatureMethod && sp.SignatureMethod != dsig.RSASHA512SignatureMethod { return fmt.Errorf("invalid signing method %s", sp.SignatureMethod) } signatureMethod := sp.SignatureMethod signingContext := dsig.NewDefaultSigningContext(keyStore) signingContext.Canonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(canonicalizerPrefixList) if err := signingContext.SetSignatureMethod(signatureMethod); err != nil { return err } assertionEl := req.Element() signedRequestEl, err := signingContext.SignEnveloped(assertionEl) if err != nil { return err } sigEl := signedRequestEl.Child[len(signedRequestEl.Child)-1] req.Signature = sigEl.(*etree.Element) return nil } // MakeLogoutRequest produces a new LogoutRequest object for idpURL. func (sp *ServiceProvider) MakeLogoutRequest(idpURL, nameID string) (*LogoutRequest, error) { req := LogoutRequest{ ID: fmt.Sprintf("id-%x", randomBytes(20)), IssueInstant: TimeNow(), Version: "2.0", Destination: idpURL, Issuer: &Issuer{ Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity", Value: firstSet(sp.EntityID, sp.MetadataURL.String()), }, NameID: &NameID{ Format: sp.nameIDFormat(), Value: nameID, NameQualifier: sp.IDPMetadata.EntityID, SPNameQualifier: sp.Metadata().EntityID, }, }
if err := sp.SignLogoutRequest(&req); err != nil { return nil, err } } return &req, nil } // MakeRedirectLogoutRequest creates a SAML authentication request using // the HTTP-Redirect binding. It returns a URL that we will redirect the user to // in order to start the auth process. func (sp *ServiceProvider) MakeRedirectLogoutRequest(nameID, relayState string) (*url.URL, error) { req, err := sp.MakeLogoutRequest(sp.GetSLOBindingLocation(HTTPRedirectBinding), nameID) if err != nil { return nil, err } return req.Redirect(relayState), nil } // Redirect returns a URL suitable for using the redirect binding with the request func (req *LogoutRequest) Redirect(relayState string) *url.URL { w := &bytes.Buffer{} w1 := base64.NewEncoder(base64.StdEncoding, w) w2, _ := flate.NewWriter(w1, 9) doc := etree.NewDocument() doc.SetRoot(req.Element()) if _, err := doc.WriteTo(w2); err != nil { panic(err) } w2.Close() w1.Close() rv, _ := url.Parse(req.Destination) query := rv.Query() query.Set("SAMLRequest", string(w.Bytes())) if relayState != "" { query.Set("RelayState", relayState) } rv.RawQuery = query.Encode() return rv } // MakePostLogoutRequest creates a SAML authentication request using // the HTTP-POST binding. It returns HTML text representing an HTML form that // can be sent presented to a browser to initiate the logout process. func (sp *ServiceProvider) MakePostLogoutRequest(nameID, relayState string) ([]byte, error) { req, err := sp.MakeLogoutRequest(sp.GetSLOBindingLocation(HTTPPostBinding), nameID) if err != nil { return nil, err } return req.Post(relayState), nil } // Post returns an HTML form suitable for using the HTTP-POST binding with the request func (req *LogoutRequest) Post(relayState string) []byte { doc := etree.NewDocument() doc.SetRoot(req.Element()) reqBuf, err := doc.WriteToBytes() if err != nil { panic(err) } encodedReqBuf := base64.StdEncoding.EncodeToString(reqBuf) tmpl := template.Must(template.New("saml-post-form").Parse(`` + `<form method="post" action="{{.URL}}" id="SAMLRequestForm">` + `<input type="hidden" name="SAMLRequest" value="{{.SAMLRequest}}" />` + `<input type="hidden" name="RelayState" value="{{.RelayState}}" />` + `<input id="SAMLSubmitButton" type="submit" value="Submit" />` + `</form>` + `<script>document.getElementById('SAMLSubmitButton').style.visibility="hidden";` + `document.getElementById('SAMLRequestForm').submit();</script>`)) data := struct { URL string SAMLRequest string RelayState string }{ URL: req.Destination, SAMLRequest: encodedReqBuf, RelayState: relayState, } rv := bytes.Buffer{} if err := tmpl.Execute(&rv, data); err != nil { panic(err) } return rv.Bytes() } // MakeLogoutResponse produces a new LogoutResponse object for idpURL and logoutRequestID. func (sp *ServiceProvider) MakeLogoutResponse(idpURL, logoutRequestID string) (*LogoutResponse, error) { response := LogoutResponse{ ID: fmt.Sprintf("id-%x", randomBytes(20)), InResponseTo: logoutRequestID, Version: "2.0", IssueInstant: TimeNow(), Destination: idpURL, Issuer: &Issuer{ Format: "urn:oasis:names:tc:SAML:2.0:nameid-format:entity", Value: firstSet(sp.EntityID, sp.MetadataURL.String()), }, Status: Status{ StatusCode: StatusCode{ Value: StatusSuccess, }, }, } if len(sp.SignatureMethod) > 0 { if err := sp.SignLogoutResponse(&response); err != nil { return nil, err } } return &response, nil } // MakeRedirectLogoutResponse creates a SAML LogoutResponse using // the HTTP-Redirect binding. It returns a URL that we will redirect the user to // for LogoutResponse. func (sp *ServiceProvider) MakeRedirectLogoutResponse(logoutRequestID, relayState string) (*url.URL, error) { resp, err := sp.MakeLogoutResponse(sp.GetSLOBindingLocation(HTTPRedirectBinding), logoutRequestID) if err != nil { return nil, err } return resp.Redirect(relayState), nil } // Redirect returns a URL suitable for using the redirect binding with the LogoutResponse. func (resp *LogoutResponse) Redirect(relayState string) *url.URL { w := &bytes.Buffer{} w1 := base64.NewEncoder(base64.StdEncoding, w) w2, _ := flate.NewWriter(w1, 9) doc := etree.NewDocument() doc.SetRoot(resp.Element()) if _, err := doc.WriteTo(w2); err != nil { panic(err) } w2.Close() w1.Close() rv, _ := url.Parse(resp.Destination) query := rv.Query() query.Set("SAMLResponse", string(w.Bytes())) if relayState != "" { query.Set("RelayState", relayState) } rv.RawQuery = query.Encode() return rv } // MakePostLogoutResponse creates a SAML LogoutResponse using // the HTTP-POST binding. It returns HTML text representing an HTML form that // can be sent presented to a browser for LogoutResponse. func (sp *ServiceProvider) MakePostLogoutResponse(logoutRequestID, relayState string) ([]byte, error) { resp, err := sp.MakeLogoutResponse(sp.GetSLOBindingLocation(HTTPPostBinding), logoutRequestID) if err != nil { return nil, err } return resp.Post(relayState), nil } // Post returns an HTML form suitable for using the HTTP-POST binding with the LogoutResponse. func (resp *LogoutResponse) Post(relayState string) []byte { doc := etree.NewDocument() doc.SetRoot(resp.Element()) reqBuf, err := doc.WriteToBytes() if err != nil { panic(err) } encodedReqBuf := base64.StdEncoding.EncodeToString(reqBuf) tmpl := template.Must(template.New("saml-post-form").Parse(`` + `<form method="post" action="{{.URL}}" id="SAMLResponseForm">` + `<input type="hidden" name="SAMLResponse" value="{{.SAMLResponse}}" />` + `<input type="hidden" name="RelayState" value="{{.RelayState}}" />` + `<input id="SAMLSubmitButton" type="submit" value="Submit" />` + `</form>` + `<script>document.getElementById('SAMLSubmitButton').style.visibility="hidden";` + `document.getElementById('SAMLResponseForm').submit();</script>`)) data := struct { URL string SAMLResponse string RelayState string }{ URL: resp.Destination, SAMLResponse: encodedReqBuf, RelayState: relayState, } rv := bytes.Buffer{} if err := tmpl.Execute(&rv, data); err != nil { panic(err) } return rv.Bytes() } // SignLogoutResponse adds the `Signature` element to the `LogoutResponse`. func (sp *ServiceProvider) SignLogoutResponse(resp *LogoutResponse) error { keyPair := tls.Certificate{ Certificate: [][]byte{sp.Certificate.Raw}, PrivateKey: sp.Key, Leaf: sp.Certificate, } // TODO: add intermediates for SP //for _, cert := range sp.Intermediates { // keyPair.Certificate = append(keyPair.Certificate, cert.Raw) //} keyStore := dsig.TLSCertKeyStore(keyPair) if sp.SignatureMethod != dsig.RSASHA1SignatureMethod && sp.SignatureMethod != dsig.RSASHA256SignatureMethod && sp.SignatureMethod != dsig.RSASHA512SignatureMethod { return fmt.Errorf("invalid signing method %s", sp.SignatureMethod) } signatureMethod := sp.SignatureMethod signingContext := dsig.NewDefaultSigningContext(keyStore) signingContext.Canonicalizer = dsig.MakeC14N10ExclusiveCanonicalizerWithPrefixList(canonicalizerPrefixList) if err := signingContext.SetSignatureMethod(signatureMethod); err != nil { return err } assertionEl := resp.Element() signedRequestEl, err := signingContext.SignEnveloped(assertionEl) if err != nil { return err } sigEl := signedRequestEl.Child[len(signedRequestEl.Child)-1] resp.Signature = sigEl.(*etree.Element) return nil } func (sp *ServiceProvider) nameIDFormat() string { var nameIDFormat string switch sp.AuthnNameIDFormat { case "": // To maintain library back-compat, use "transient" if unset. nameIDFormat = string(TransientNameIDFormat) case UnspecifiedNameIDFormat: // Spec defines an empty value as "unspecified" so don't set one. default: nameIDFormat = string(sp.AuthnNameIDFormat) } return nameIDFormat } // ValidateLogoutResponseRequest validates the LogoutResponse content from the request func (sp *ServiceProvider) ValidateLogoutResponseRequest(req *http.Request) error { if data := req.URL.Query().Get("SAMLResponse"); data != "" { return sp.ValidateLogoutResponseRedirect(data) } err := req.ParseForm() if err != nil { return fmt.Errorf("unable to parse form: %v", err) } return sp.ValidateLogoutResponseForm(req.PostForm.Get("SAMLResponse")) } // ValidateLogoutResponseForm returns a nil error if the logout response is valid. func (sp *ServiceProvider) ValidateLogoutResponseForm(postFormData string) error { rawResponseBuf, err := base64.StdEncoding.DecodeString(postFormData) if err != nil { return fmt.Errorf("unable to parse base64: %s", err) } // TODO(ross): add test case for this (SLO does not have tests right now) if err := xrv.Validate(bytes.NewReader(rawResponseBuf)); err != nil { return fmt.Errorf("response contains invalid XML: %s", err) } var resp LogoutResponse if err := xml.Unmarshal(rawResponseBuf, &resp); err != nil { return fmt.Errorf("cannot unmarshal response: %s", err) } if err := sp.validateLogoutResponse(&resp); err != nil { return err } doc := etree.NewDocument() if err := doc.ReadFromBytes(rawResponseBuf); err != nil { return err } responseEl := doc.Root() if err = sp.validateSigned(responseEl); err != nil { return err } return nil } // ValidateLogoutResponseRedirect returns a nil error if the logout response is valid. // // URL Binding appears to be gzip / flate encoded // See https://www.oasis-open.org/committees/download.php/20645/sstc-saml-tech-overview-2%200-draft-10.pdf 6.6 func (sp *ServiceProvider) ValidateLogoutResponseRedirect(queryParameterData string) error { rawResponseBuf, err := base64.StdEncoding.DecodeString(queryParameterData) if err != nil { return fmt.Errorf("unable to parse base64: %s", err) } gr, err := ioutil.ReadAll(flate.NewReader(bytes.NewBuffer(rawResponseBuf))) if err != nil { return err } if err := xrv.Validate(bytes.NewReader(gr)); err != nil { return err } decoder := xml.NewDecoder(bytes.NewReader(gr)) var resp LogoutResponse err = decoder.Decode(&resp) if err != nil { return fmt.Errorf("unable to flate decode: %s", err) } if err := sp.validateLogoutResponse(&resp); err != nil { return err } doc := etree.NewDocument() if _, err := doc.ReadFrom(bytes.NewReader(gr)); err != nil { return err } responseEl := doc.Root() if err = sp.validateSigned(responseEl); err != nil { return err } return nil } // validateLogoutResponse validates the LogoutResponse fields. Returns a nil error if the LogoutResponse is valid. func (sp *ServiceProvider) validateLogoutResponse(resp *LogoutResponse) error { if resp.Destination != sp.SloURL.String() { return fmt.Errorf("`Destination` does not match SloURL (expected %q)", sp.SloURL.String()) } now := time.Now() if resp.IssueInstant.Add(MaxIssueDelay).Before(now) { return fmt.Errorf("issueInstant expired at %s", resp.IssueInstant.Add(MaxIssueDelay)) } if resp.Issuer.Value != sp.IDPMetadata.EntityID { return fmt.Errorf("issuer does not match the IDP metadata (expected %q)", sp.IDPMetadata.EntityID) } if resp.Status.StatusCode.Value != StatusSuccess { return fmt.Errorf("status code was not %s", StatusSuccess) } return nil } func firstSet(a, b string) string { if a == "" { return b } return a }
if len(sp.SignatureMethod) > 0 {
ower.rs
#[doc = "Register `OWER` writer"] pub struct W(crate::W<OWER_SPEC>); impl core::ops::Deref for W { type Target = crate::W<OWER_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<OWER_SPEC>> for W { fn from(writer: crate::W<OWER_SPEC>) -> Self { W(writer) } } #[doc = "Field `P0` writer - Output Write Enable"] pub struct P0_W<'a> { w: &'a mut W, } impl<'a> P0_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Field `P1` writer - Output Write Enable"] pub struct P1_W<'a> { w: &'a mut W, } impl<'a> P1_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "Field `P2` writer - Output Write Enable"] pub struct P2_W<'a> { w: &'a mut W, } impl<'a> P2_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 2)) | ((value as u32 & 0x01) << 2); self.w } } #[doc = "Field `P3` writer - Output Write Enable"] pub struct P3_W<'a> { w: &'a mut W, } impl<'a> P3_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 3)) | ((value as u32 & 0x01) << 3); self.w } } #[doc = "Field `P4` writer - Output Write Enable"] pub struct P4_W<'a> { w: &'a mut W, } impl<'a> P4_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 4)) | ((value as u32 & 0x01) << 4); self.w } } #[doc = "Field `P5` writer - Output Write Enable"] pub struct P5_W<'a> { w: &'a mut W, } impl<'a> P5_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 5)) | ((value as u32 & 0x01) << 5); self.w } } #[doc = "Field `P6` writer - Output Write Enable"] pub struct P6_W<'a> { w: &'a mut W, } impl<'a> P6_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 6)) | ((value as u32 & 0x01) << 6); self.w } } #[doc = "Field `P7` writer - Output Write Enable"] pub struct P7_W<'a> { w: &'a mut W, } impl<'a> P7_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 7)) | ((value as u32 & 0x01) << 7); self.w } } #[doc = "Field `P8` writer - Output Write Enable"] pub struct P8_W<'a> { w: &'a mut W, } impl<'a> P8_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Field `P9` writer - Output Write Enable"] pub struct P9_W<'a> { w: &'a mut W, } impl<'a> P9_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 9)) | ((value as u32 & 0x01) << 9); self.w } } #[doc = "Field `P10` writer - Output Write Enable"] pub struct P10_W<'a> { w: &'a mut W, } impl<'a> P10_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 10)) | ((value as u32 & 0x01) << 10); self.w } } #[doc = "Field `P11` writer - Output Write Enable"] pub struct P11_W<'a> { w: &'a mut W, } impl<'a> P11_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 11)) | ((value as u32 & 0x01) << 11); self.w } } #[doc = "Field `P12` writer - Output Write Enable"] pub struct P12_W<'a> { w: &'a mut W, } impl<'a> P12_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Field `P13` writer - Output Write Enable"] pub struct P13_W<'a> { w: &'a mut W, } impl<'a> P13_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 13)) | ((value as u32 & 0x01) << 13); self.w } } #[doc = "Field `P14` writer - Output Write Enable"] pub struct P14_W<'a> { w: &'a mut W, } impl<'a> P14_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 14)) | ((value as u32 & 0x01) << 14); self.w } } #[doc = "Field `P15` writer - Output Write Enable"] pub struct P15_W<'a> { w: &'a mut W, } impl<'a> P15_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 15)) | ((value as u32 & 0x01) << 15); self.w } } #[doc = "Field `P16` writer - Output Write Enable"] pub struct P16_W<'a> { w: &'a mut W, } impl<'a> P16_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 16)) | ((value as u32 & 0x01) << 16); self.w } } #[doc = "Field `P17` writer - Output Write Enable"] pub struct P17_W<'a> { w: &'a mut W, } impl<'a> P17_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 17)) | ((value as u32 & 0x01) << 17); self.w } } #[doc = "Field `P18` writer - Output Write Enable"] pub struct P18_W<'a> { w: &'a mut W, } impl<'a> P18_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 18)) | ((value as u32 & 0x01) << 18); self.w } } #[doc = "Field `P19` writer - Output Write Enable"] pub struct P19_W<'a> { w: &'a mut W, } impl<'a> P19_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 19)) | ((value as u32 & 0x01) << 19); self.w } } #[doc = "Field `P20` writer - Output Write Enable"] pub struct P20_W<'a> { w: &'a mut W, } impl<'a> P20_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20); self.w } } #[doc = "Field `P21` writer - Output Write Enable"] pub struct P21_W<'a> { w: &'a mut W, } impl<'a> P21_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 21)) | ((value as u32 & 0x01) << 21); self.w } } #[doc = "Field `P22` writer - Output Write Enable"] pub struct P22_W<'a> { w: &'a mut W, } impl<'a> P22_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn
(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 22)) | ((value as u32 & 0x01) << 22); self.w } } #[doc = "Field `P23` writer - Output Write Enable"] pub struct P23_W<'a> { w: &'a mut W, } impl<'a> P23_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 23)) | ((value as u32 & 0x01) << 23); self.w } } #[doc = "Field `P24` writer - Output Write Enable"] pub struct P24_W<'a> { w: &'a mut W, } impl<'a> P24_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | ((value as u32 & 0x01) << 24); self.w } } #[doc = "Field `P25` writer - Output Write Enable"] pub struct P25_W<'a> { w: &'a mut W, } impl<'a> P25_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 25)) | ((value as u32 & 0x01) << 25); self.w } } #[doc = "Field `P26` writer - Output Write Enable"] pub struct P26_W<'a> { w: &'a mut W, } impl<'a> P26_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 26)) | ((value as u32 & 0x01) << 26); self.w } } #[doc = "Field `P27` writer - Output Write Enable"] pub struct P27_W<'a> { w: &'a mut W, } impl<'a> P27_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 27)) | ((value as u32 & 0x01) << 27); self.w } } #[doc = "Field `P28` writer - Output Write Enable"] pub struct P28_W<'a> { w: &'a mut W, } impl<'a> P28_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 28)) | ((value as u32 & 0x01) << 28); self.w } } #[doc = "Field `P29` writer - Output Write Enable"] pub struct P29_W<'a> { w: &'a mut W, } impl<'a> P29_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 29)) | ((value as u32 & 0x01) << 29); self.w } } #[doc = "Field `P30` writer - Output Write Enable"] pub struct P30_W<'a> { w: &'a mut W, } impl<'a> P30_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 30)) | ((value as u32 & 0x01) << 30); self.w } } #[doc = "Field `P31` writer - Output Write Enable"] pub struct P31_W<'a> { w: &'a mut W, } impl<'a> P31_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 31)) | ((value as u32 & 0x01) << 31); self.w } } impl W { #[doc = "Bit 0 - Output Write Enable"] #[inline(always)] pub fn p0(&mut self) -> P0_W { P0_W { w: self } } #[doc = "Bit 1 - Output Write Enable"] #[inline(always)] pub fn p1(&mut self) -> P1_W { P1_W { w: self } } #[doc = "Bit 2 - Output Write Enable"] #[inline(always)] pub fn p2(&mut self) -> P2_W { P2_W { w: self } } #[doc = "Bit 3 - Output Write Enable"] #[inline(always)] pub fn p3(&mut self) -> P3_W { P3_W { w: self } } #[doc = "Bit 4 - Output Write Enable"] #[inline(always)] pub fn p4(&mut self) -> P4_W { P4_W { w: self } } #[doc = "Bit 5 - Output Write Enable"] #[inline(always)] pub fn p5(&mut self) -> P5_W { P5_W { w: self } } #[doc = "Bit 6 - Output Write Enable"] #[inline(always)] pub fn p6(&mut self) -> P6_W { P6_W { w: self } } #[doc = "Bit 7 - Output Write Enable"] #[inline(always)] pub fn p7(&mut self) -> P7_W { P7_W { w: self } } #[doc = "Bit 8 - Output Write Enable"] #[inline(always)] pub fn p8(&mut self) -> P8_W { P8_W { w: self } } #[doc = "Bit 9 - Output Write Enable"] #[inline(always)] pub fn p9(&mut self) -> P9_W { P9_W { w: self } } #[doc = "Bit 10 - Output Write Enable"] #[inline(always)] pub fn p10(&mut self) -> P10_W { P10_W { w: self } } #[doc = "Bit 11 - Output Write Enable"] #[inline(always)] pub fn p11(&mut self) -> P11_W { P11_W { w: self } } #[doc = "Bit 12 - Output Write Enable"] #[inline(always)] pub fn p12(&mut self) -> P12_W { P12_W { w: self } } #[doc = "Bit 13 - Output Write Enable"] #[inline(always)] pub fn p13(&mut self) -> P13_W { P13_W { w: self } } #[doc = "Bit 14 - Output Write Enable"] #[inline(always)] pub fn p14(&mut self) -> P14_W { P14_W { w: self } } #[doc = "Bit 15 - Output Write Enable"] #[inline(always)] pub fn p15(&mut self) -> P15_W { P15_W { w: self } } #[doc = "Bit 16 - Output Write Enable"] #[inline(always)] pub fn p16(&mut self) -> P16_W { P16_W { w: self } } #[doc = "Bit 17 - Output Write Enable"] #[inline(always)] pub fn p17(&mut self) -> P17_W { P17_W { w: self } } #[doc = "Bit 18 - Output Write Enable"] #[inline(always)] pub fn p18(&mut self) -> P18_W { P18_W { w: self } } #[doc = "Bit 19 - Output Write Enable"] #[inline(always)] pub fn p19(&mut self) -> P19_W { P19_W { w: self } } #[doc = "Bit 20 - Output Write Enable"] #[inline(always)] pub fn p20(&mut self) -> P20_W { P20_W { w: self } } #[doc = "Bit 21 - Output Write Enable"] #[inline(always)] pub fn p21(&mut self) -> P21_W { P21_W { w: self } } #[doc = "Bit 22 - Output Write Enable"] #[inline(always)] pub fn p22(&mut self) -> P22_W { P22_W { w: self } } #[doc = "Bit 23 - Output Write Enable"] #[inline(always)] pub fn p23(&mut self) -> P23_W { P23_W { w: self } } #[doc = "Bit 24 - Output Write Enable"] #[inline(always)] pub fn p24(&mut self) -> P24_W { P24_W { w: self } } #[doc = "Bit 25 - Output Write Enable"] #[inline(always)] pub fn p25(&mut self) -> P25_W { P25_W { w: self } } #[doc = "Bit 26 - Output Write Enable"] #[inline(always)] pub fn p26(&mut self) -> P26_W { P26_W { w: self } } #[doc = "Bit 27 - Output Write Enable"] #[inline(always)] pub fn p27(&mut self) -> P27_W { P27_W { w: self } } #[doc = "Bit 28 - Output Write Enable"] #[inline(always)] pub fn p28(&mut self) -> P28_W { P28_W { w: self } } #[doc = "Bit 29 - Output Write Enable"] #[inline(always)] pub fn p29(&mut self) -> P29_W { P29_W { w: self } } #[doc = "Bit 30 - Output Write Enable"] #[inline(always)] pub fn p30(&mut self) -> P30_W { P30_W { w: self } } #[doc = "Bit 31 - Output Write Enable"] #[inline(always)] pub fn p31(&mut self) -> P31_W { P31_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "Output Write Enable\n\nThis register you can [`write_with_zero`](crate::generic::Reg::write_with_zero). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [ower](index.html) module"] pub struct OWER_SPEC; impl crate::RegisterSpec for OWER_SPEC { type Ux = u32; } #[doc = "`write(|w| ..)` method takes [ower::W](W) writer structure"] impl crate::Writable for OWER_SPEC { type Writer = W; }
bit
generics_automata.rs
use typestate::typestate; #[typestate] mod my_state { #[automaton] pub struct
; #[state] pub struct State1<T> { data: T, } trait State1 { fn new<T>() -> State1<T>; fn done(self); } } fn main() {}
MyState
stack.py
class Stack: #initialize stack and top def __init__(self,max_size=None): self.__stack = [] self.__max_size = max_size self.__top = 0 #current length of stack def __len__(self): return len(self.__stack) #check if stack is empty def is_empty(self): return True if self.__top==0 else False #check if stack is full def is_full(self): return True if self.__max_size and self.__max_size==self.__top else False #retrieve stack def get(self,index=None): if index is not None:
def push(self,x): if self.is_full(): print 'Overflow' return None else: self.__stack.append(x) self.__top+=1 #remove item from stack def pop(self): if self.is_empty(): print 'Underflow' return None else: self.__top-=1 return self.__stack.pop() #show item on top of stack def peek(self): if self.is_empty(): print 'Empty Stack' return else: return self.__stack[-1]
return self.__stack[index] return self.__stack #add item to stack
enlist.py
async def
(self, message, command, arguments): await self.run_file("section_slot_assign", message, arguments)
Main
lib.rs
//! provides function attribute macros for rust-crowbar crate extern crate proc_macro; #[macro_use] extern crate quote; extern crate syn; use proc_macro::TokenStream; use syn::{parse, ItemFn, ReturnType}; /// Implements the `lambdafn` attribute. /// /// This attribute is used to export a Rust function into an /// AWS triggerable Lambda function. In lambda you can refer to these by path with /// `liblambda.{fn_name}` /// /// # Examples /// /// ```rust,ignore /// #[macro_use] extern crate crowbar; /// #[macro_use] extern crate cpython; /// /// #[lambdafn] /// fn example( /// event: crowbar::Value, /// ctx: crowbar::LambdaContext /// ) -> crowbar::LambdaResult { /// Ok(event) /// } /// ``` #[proc_macro_attribute] pub fn lambdafn(args: TokenStream, input: TokenStream) -> TokenStream { attr_impl(args, input) } // implementation. should expect the following // * verify function type // * input args are (event, context) // * has a return type fn attr_impl(_: TokenStream, input: TokenStream) -> TokenStream { let target: ItemFn = match parse(input.clone()) { Ok(f) => f, _ => { panic!("the 'lambdafn' attribute can only be used on functions"); // https://doc.rust-lang.org/proc_macro/struct.Span.html#method.error // use span diagnotics when this becomes stable } }; if target.decl.inputs.len() != 2
match target.decl.output { ReturnType::Default => { // https://doc.rust-lang.org/proc_macro/struct.Span.html#method.error // use span diagnotics when it becomes stable panic!("the 'lambdafn' attribute requires a function that returns a value. expecting {}(_: crowbar::Value, _: crowbar::LambdaContext) -> crowbar::LambdaResult", target.ident); }, _ => () } let target_ident = target.ident.clone(); let target_name = target_ident.to_string(); let expanded = quote! { #target lambda!(#target_name => #target_ident); }; expanded.into() }
{ panic!( "the 'lambdafn' attribute requires a function with two arguments. expecting {}(_: crowbar::Value, _: crowbar::LambdaContext) -> crowbar::LambdaResult", target.ident ); // https://doc.rust-lang.org/proc_macro/struct.Span.html#method.error // use span diagnotics when it becomes stable }
main.go
// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Code generated by cloud.google.com/go/internal/gapicgen/gensnippets. DO NOT EDIT. // [START privateca_v1beta1_generated_CertificateAuthorityService_RevokeCertificate_sync] package main import ( "context" privateca "cloud.google.com/go/security/privateca/apiv1beta1" privatecapb "google.golang.org/genproto/googleapis/cloud/security/privateca/v1beta1" ) func main()
// [END privateca_v1beta1_generated_CertificateAuthorityService_RevokeCertificate_sync]
{ ctx := context.Background() c, err := privateca.NewCertificateAuthorityClient(ctx) if err != nil { // TODO: Handle error. } defer c.Close() req := &privatecapb.RevokeCertificateRequest{ // TODO: Fill request struct fields. // See https://pkg.go.dev/google.golang.org/genproto/googleapis/cloud/security/privateca/v1beta1#RevokeCertificateRequest. } resp, err := c.RevokeCertificate(ctx, req) if err != nil { // TODO: Handle error. } // TODO: Use resp. _ = resp }
01-Classes.py
# Class: blueprint for creating new objects # Object: instances of a class # Class: Human # Objects: John, Mary, Jack class Point: def draw(self):
point = Point() print(type(point)) print(isinstance(point, Point))
print("draw")
MainCard.js
import PropTypes from "prop-types"; import { forwardRef } from "react"; // material-ui import { useTheme } from "@mui/material/styles"; import { Card, CardContent, CardHeader, Divider, Typography } from "@mui/material"; // constant const headerSX = { "& .MuiCardHeader-action": { mr: 0 } }; // ==============================|| CUSTOM MAIN CARD ||============================== // const MainCard = forwardRef( ( { border = true, boxShadow, children, content = true, contentClass = "", contentSX = {}, darkTitle, secondary, shadow, sx = {}, title, contentProps, ...others }, ref ) => { const theme = useTheme(); return ( <Card ref={ref} {...others} sx={{ border: border ? "1px solid" : "none", borderColor: theme.palette.primary[200] + 75, ":hover": { boxShadow: boxShadow ? shadow || "0 2px 14px 0 rgb(32 40 45 / 8%)" : "inherit" }, ...sx }} > {/* card header and action */} {!darkTitle && title && <CardHeader sx={headerSX} title={title} action={secondary} />} {darkTitle && title && ( <CardHeader sx={headerSX} title={<Typography variant="h3">{title}</Typography>} action={secondary} /> )} {/* content & header divider */} {title && <Divider />} {/* card content */} {content && ( <CardContent sx={contentSX} className={contentClass} {...contentProps}> {children} </CardContent> )} {!content && children} </Card> ); } ); MainCard.propTypes = { border: PropTypes.bool, boxShadow: PropTypes.bool, children: PropTypes.node, content: PropTypes.bool, contentClass: PropTypes.string, contentSX: PropTypes.object, darkTitle: PropTypes.bool,
secondary: PropTypes.oneOfType([PropTypes.node, PropTypes.string, PropTypes.object]), shadow: PropTypes.string, sx: PropTypes.object, title: PropTypes.oneOfType([PropTypes.node, PropTypes.string, PropTypes.object]), contentProps: PropTypes.oneOfType([PropTypes.object]) }; export default MainCard;
context.py
# Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details. # All rights reserved. Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file.) import sublime class ContextProviderMixin(object): '''Provides a method to evaluate contexts. Useful with sublime_plugin.EventListeners that need to evaluate contexts. ''' def _
self, value, operator, operand, match_all): if operator == sublime.OP_EQUAL: if operand == True: return value elif operand == False: return not value elif operator == sublime.OP_NOT_EQUAL: if operand == True: return not value elif operand == False: return value
check(
api.rs
use crate::{checks::pongo::*, *}; use rayon::prelude::*; /** Public library API for Krecik remote-checks functionality **/ /// Return checks from path, excluding remotes pub fn all_checks_but_remotes() -> Vec<Check> { list_all_checks_from(CHECKS_DIR) .par_iter() .filter_map(|check_path| { if !check_path.contains(REMOTE_CHECKS_DIR) && !check_path.contains(TESTS_DIR) { // select only valid Check, just ignore any malformed ones read_single_check(check_path) } else { None } }) .collect()
} /// Return remote domain+pages checks via mapper pub fn all_checks_pongo_merged() -> Vec<Check> { list_all_checks_from(&format!("{}/{}", CHECKS_DIR, REMOTE_CHECKS_DIR)) .into_iter() .map(|pongo_mapper| { let mapper = read_pongo_mapper(&pongo_mapper); let all_pongo_checks = get_pongo_checks(&mapper.url); let domain_checks = all_pongo_checks .clone() .into_par_iter() .flat_map(|check| collect_pongo_domains(&check)) .collect(); let pongo_checks = all_pongo_checks .into_par_iter() .flat_map(|check| collect_pongo_hosts(&check, &mapper)) .collect(); Check { pages: Some(pongo_checks), domains: Some(domain_checks), notifier: mapper.notifier, } }) .collect() }
nn.py
""" A NeuralNet is just a collection of layers. It behaves a lot like a layer itself, although we're not going to make it one. """ from typing import Sequence, Iterator, Tuple from .tensor import Tensor from .layers import Layer class NeuralNet: def __init__(self, layers: Sequence[Layer]) -> None: self.layers = layers def forward(self, inputs: Tensor) -> Tensor: for layer in self.layers: inputs = layer.forward(inputs) return inputs def
(self, grad: Tensor) -> Tensor: for layer in reversed(self.layers): grad = layer.backward(grad) return grad def params_and_grads(self) -> Iterator[Tuple[Tensor, Tensor]]: for layer in self.layers: for name, param in layer.params.items(): grad = layer.grads[name] yield param, grad
backward
resume.min.js
/*! * Start Bootstrap - Resume v5.0.8 (https://startbootstrap.com/template-overviews/resume) * Copyright 2013-2022 Start Bootstrap
* Licensed under MIT (https://github.com/BlackrockDigital/startbootstrap-resume/blob/master/LICENSE) */ !function(e){"use strict";e('a.js-scroll-trigger[href*="#"]:not([href="#"])').click((function(){if(location.pathname.replace(/^\//,"")==this.pathname.replace(/^\//,"")&&location.hostname==this.hostname){var t=e(this.hash);if((t=t.length?t:e("[name="+this.hash.slice(1)+"]")).length)return e("html, body").animate({scrollTop:t.offset().top},1e3,"easeInOutExpo"),!1}})),e(".js-scroll-trigger").click((function(){e(".navbar-collapse").collapse("hide")})),e("body").scrollspy({target:"#sideNav"})}(jQuery);
options.rs
use std::path::PathBuf; use clap::{Clap, AppSettings, ValueHint}; use crate::server::methods::InstallMethod; use crate::server::version::Version; #[derive(Clap, Debug, Clone)] #[clap(setting=AppSettings::DisableVersionFlag)] pub struct ProjectCommand { #[clap(subcommand)] pub subcommand: Command, } #[derive(Clap, Clone, Debug)] pub enum Command {
/// Initialize a new or existing project Init(Init), /// Remove association with and optionally destroy the /// linked EdgeDB intstance. Unlink(Unlink), } #[derive(Clap, Debug, Clone)] #[clap(setting=AppSettings::DisableVersionFlag)] pub struct Init { /// Specifies a project root directory explicitly. #[clap(value_hint=ValueHint::DirPath)] pub project_dir: Option<PathBuf>, /// Specifies the desired EdgeDB server version #[clap(long)] pub server_version: Option<Version<String>>, /// Specifies the EdgeDB server instance to be associated with the project #[clap(long)] pub server_instance: Option<String>, /// Specifies a project root directory explicitly. #[clap(long, possible_values=&["package", "docker"][..])] pub server_install_method: Option<InstallMethod>, /// Run in non-interactive mode (accepting all defaults) #[clap(long)] pub non_interactive: bool, } #[derive(Clap, Debug, Clone)] #[clap(setting=AppSettings::DisableVersionFlag)] pub struct Unlink { /// Specifies a project root directory explicitly. #[clap(value_hint=ValueHint::DirPath)] pub project_dir: Option<PathBuf>, /// If specified, the associated EdgeDB instance is destroyed by running edgedb server destroy. #[clap(long, short='D')] pub destroy_server_instance: bool, #[clap(long)] pub non_interactive: bool, }
primitives_test.py
# pylint: disable=missing-docstring import unittest import numpy as np import tensorflow as tf from absl.testing import parameterized from tf_encrypted.primitives import paillier from tf_encrypted.test import tf_execution_context class EncryptionTest(parameterized.TestCase):
if __name__ == "__main__": unittest.main()
@parameterized.parameters( { "run_eagerly": run_eagerly, "export_dtype": export_dtype, "export_expansion": export_expansion, } for run_eagerly in [True, False] for export_dtype, export_expansion in [(tf.string, ())] ) def test_export(self, run_eagerly, export_dtype, export_expansion): x = np.array([[12345, 34342]]) context = tf_execution_context(run_eagerly) with context.scope(): ek, dk = paillier.gen_keypair() assert isinstance(ek, paillier.EncryptionKey) assert isinstance(dk, paillier.DecryptionKey) n_exported = ek.export(export_dtype) assert isinstance(n_exported, tf.Tensor) assert n_exported.dtype == export_dtype assert n_exported.shape == (1, 1), n_exported.shape p_exported, q_exported = dk.export(export_dtype) assert isinstance(p_exported, tf.Tensor) assert p_exported.dtype == export_dtype assert p_exported.shape == (1, 1), p_exported.shape assert isinstance(q_exported, tf.Tensor) assert q_exported.dtype == export_dtype assert q_exported.shape == (1, 1), q_exported.shape r = paillier.gen_randomness(ek, shape=x.shape) assert isinstance(r, paillier.Randomness) r_exported = r.export(export_dtype) assert isinstance(r_exported, tf.Tensor) assert r_exported.dtype == export_dtype assert r_exported.shape == x.shape + export_expansion c = paillier.encrypt(ek, x, r) assert isinstance(c, paillier.Ciphertext) c_exported = c.export(export_dtype) assert isinstance(c_exported, tf.Tensor) assert c_exported.dtype == export_dtype assert c_exported.shape == x.shape + export_expansion @parameterized.parameters( {"run_eagerly": run_eagerly} for run_eagerly in (True, False) ) def test_correctness(self, run_eagerly): p = 100000015333 q = 100000015021 n = p * q nn = n * n g = 1 + n x = 123456789 r = 5083216764521909821749 c = pow(g, x, nn) * pow(r, n, nn) % nn context = tf_execution_context(run_eagerly) with context.scope(): ek = paillier.EncryptionKey(tf.constant([[str(n)]])) plaintext = np.array([[x]]).astype(str) randomness = paillier.Randomness(tf.constant([[str(r)]])) ciphertext = paillier.encrypt(ek, plaintext, randomness) expected = np.array([[c]]).astype(str) actual = ciphertext.export(tf.string) np.testing.assert_equal(context.evaluate(actual).astype(str), expected) @parameterized.parameters( {"run_eagerly": run_eagerly, "x": x, "dtype": dtype} for run_eagerly in [True, False] for x, dtype in [ (np.array([[12345, 34342]]).astype(np.int32), tf.int32), (np.array([["12345", "34342"]]).astype(str), tf.string), ( np.array( [ [ "123456789123456789123456789123456789", "987654321987654321987654321987654321", ] ] ).astype(str), tf.string, ), ] ) def test_encrypt_decrypt(self, run_eagerly, x, dtype): context = tf_execution_context(run_eagerly) with context.scope(): ek, dk = paillier.gen_keypair() r = paillier.gen_randomness(ek, shape=x.shape) c = paillier.encrypt(ek, x, r) y = paillier.decrypt(dk, c, dtype=dtype) assert isinstance(y, tf.Tensor) assert y.dtype == dtype np.testing.assert_equal(context.evaluate(y).astype(x.dtype), x) @parameterized.parameters( {"run_eagerly": run_eagerly, "dtype": dtype, "x0": x0, "x1": x1} for run_eagerly in (True, False) for dtype in (tf.int32, tf.string) for x0 in (np.array([[12345, 123243]]), np.array([[12345]])) for x1 in (np.array([[12656, 434234]]),) ) def test_add(self, run_eagerly, dtype, x0, x1): expected = x0 + x1 context = tf_execution_context(run_eagerly) with context.scope(): ek, dk = paillier.gen_keypair() r0 = paillier.gen_randomness(ek, shape=x0.shape) c0 = paillier.encrypt(ek, x0, r0) r1 = paillier.gen_randomness(ek, shape=x1.shape) c1 = paillier.encrypt(ek, x1, r1) c = paillier.add(ek, c0, c1) y = paillier.decrypt(dk, c, dtype=dtype) np.testing.assert_equal( context.evaluate(y).astype(np.int32), expected.astype(np.int32) )
translations.ts
/* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ import { i18n } from '@kbn/i18n'; export * from '../case_view/translations'; export const ALREADY_PUSHED_TO_SERVICE = (externalService: string) => i18n.translate('xpack.securitySolution.case.caseView.alreadyPushedToExternalService', { values: { externalService }, defaultMessage: 'Already pushed to { externalService } incident', }); export const REQUIRED_UPDATE_TO_SERVICE = (externalService: string) => i18n.translate('xpack.securitySolution.case.caseView.requiredUpdateToExternalService', { values: { externalService }, defaultMessage: 'Requires update to { externalService } incident', }); export const COPY_REFERENCE_LINK = i18n.translate( 'xpack.securitySolution.case.caseView.copyCommentLinkAria', { defaultMessage: 'Copy reference link', } ); export const MOVE_TO_ORIGINAL_COMMENT = i18n.translate( 'xpack.securitySolution.case.caseView.moveToCommentAria', { defaultMessage: 'Highlight the referenced comment', } ); export const ALERT_COMMENT_LABEL_TITLE = i18n.translate( 'xpack.securitySolution.case.caseView.alertCommentLabelTitle', { defaultMessage: 'added an alert from', } ); export const ALERT_RULE_DELETED_COMMENT_LABEL = i18n.translate( 'xpack.securitySolution.case.caseView.alertRuleDeletedLabelTitle', { defaultMessage: 'added an alert', } ); export const SHOW_ALERT_TOOLTIP = i18n.translate( 'xpack.securitySolution.case.caseView.showAlertTooltip', { defaultMessage: 'Show alert details', } ); export const ALERT_NOT_FOUND_TOOLTIP = i18n.translate( 'xpack.securitySolution.case.caseView.showAlertDeletedTooltip', {
defaultMessage: 'Alert not found', } );
inbound_handlers.rs
// Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use crate::{ base_node::{ comms_interface::{ error::CommsInterfaceError, local_interface::BlockEventSender, NodeCommsRequest, NodeCommsResponse, }, OutboundNodeCommsInterface, }, blocks::{block_header::BlockHeader, Block, NewBlock, NewBlockTemplate}, chain_storage::{async_db::AsyncBlockchainDb, BlockAddResult, BlockchainBackend, ChainBlock, PrunedOutput}, consensus::{ConsensusConstants, ConsensusManager}, mempool::{async_mempool, Mempool}, proof_of_work::{Difficulty, PowAlgorithm}, transactions::transaction::TransactionKernel, }; use log::*; use std::{ fmt::{Display, Error, Formatter}, sync::Arc, }; use strum_macros::Display; use tari_common_types::types::{BlockHash, HashOutput}; use tari_comms::peer_manager::NodeId; use tari_crypto::tari_utilities::{hash::Hashable, hex::Hex}; use tokio::sync::Semaphore; const LOG_TARGET: &str = "c::bn::comms_interface::inbound_handler"; const MAX_HEADERS_PER_RESPONSE: u32 = 100; /// Events that can be published on the Validated Block Event Stream /// Broadcast is to notify subscribers if this is a valid propagated block event #[derive(Debug, Clone, Display)] pub enum BlockEvent { ValidBlockAdded(Arc<Block>, BlockAddResult, Broadcast), AddBlockFailed(Arc<Block>, Broadcast), BlockSyncComplete(Arc<ChainBlock>), BlockSyncRewind(Vec<Arc<ChainBlock>>), } /// Used to notify if the block event is for a propagated block. #[derive(Debug, Clone, Copy)] pub struct Broadcast(bool); impl Broadcast { #[inline] pub fn is_true(&self) -> bool { self.0 } } #[allow(clippy::identity_op)] impl Display for Broadcast { fn fmt(&self, f: &mut Formatter) -> Result<(), Error> { write!(f, "Broadcast[{}]", self.0) }
fn from(v: Broadcast) -> Self { v.0 } } impl From<bool> for Broadcast { fn from(v: bool) -> Self { Broadcast(v) } } /// The InboundNodeCommsInterface is used to handle all received inbound requests from remote nodes. pub struct InboundNodeCommsHandlers<T> { block_event_sender: BlockEventSender, blockchain_db: AsyncBlockchainDb<T>, mempool: Mempool, consensus_manager: ConsensusManager, new_block_request_semaphore: Arc<Semaphore>, outbound_nci: OutboundNodeCommsInterface, } impl<T> InboundNodeCommsHandlers<T> where T: BlockchainBackend + 'static { /// Construct a new InboundNodeCommsInterface. pub fn new( block_event_sender: BlockEventSender, blockchain_db: AsyncBlockchainDb<T>, mempool: Mempool, consensus_manager: ConsensusManager, outbound_nci: OutboundNodeCommsInterface, ) -> Self { Self { block_event_sender, blockchain_db, mempool, consensus_manager, new_block_request_semaphore: Arc::new(Semaphore::new(1)), outbound_nci, } } /// Handle inbound node comms requests from remote nodes and local services. pub async fn handle_request(&self, request: NodeCommsRequest) -> Result<NodeCommsResponse, CommsInterfaceError> { debug!(target: LOG_TARGET, "Handling remote request {}", request); match request { NodeCommsRequest::GetChainMetadata => Ok(NodeCommsResponse::ChainMetadata( self.blockchain_db.get_chain_metadata().await?, )), NodeCommsRequest::FetchHeaders(block_nums) => { let mut block_headers = Vec::<BlockHeader>::with_capacity(block_nums.len()); for block_num in block_nums { match self.blockchain_db.fetch_header(block_num).await { Ok(Some(block_header)) => { block_headers.push(block_header); }, Ok(None) => return Err(CommsInterfaceError::BlockHeaderNotFound(block_num)), Err(err) => { error!(target: LOG_TARGET, "Could not fetch headers: {}", err.to_string()); return Err(err.into()); }, } } Ok(NodeCommsResponse::BlockHeaders(block_headers)) }, NodeCommsRequest::FetchHeadersWithHashes(block_hashes) => { let mut block_headers = Vec::<BlockHeader>::with_capacity(block_hashes.len()); for block_hash in block_hashes { let block_hex = block_hash.to_hex(); match self.blockchain_db.fetch_header_by_block_hash(block_hash).await? { Some(block_header) => { block_headers.push(block_header); }, None => { error!(target: LOG_TARGET, "Could not fetch headers with hashes:{}", block_hex); return Err(CommsInterfaceError::InternalError(format!( "Could not fetch headers with hashes:{}", block_hex ))); }, } } Ok(NodeCommsResponse::BlockHeaders(block_headers)) }, NodeCommsRequest::FetchHeadersAfter(header_hashes, stopping_hash) => { let mut starting_block = None; // Find first header that matches for header_hash in header_hashes { match self .blockchain_db .fetch_header_by_block_hash(header_hash.clone()) .await? { Some(from_block) => { starting_block = Some(from_block); break; }, None => { // Not an error. The header requested is simply not in our chain. // Logging it as debug because it may not just be not found. debug!( target: LOG_TARGET, "Skipping header {} when searching for matching headers in our chain.", header_hash.to_hex(), ); }, } } let starting_block = match starting_block { Some(b) => b, // Send from genesis block if no hashes match None => self .blockchain_db .fetch_header(0) .await? .ok_or(CommsInterfaceError::BlockHeaderNotFound(0))?, }; let mut headers = Vec::with_capacity(MAX_HEADERS_PER_RESPONSE as usize); for i in 1..MAX_HEADERS_PER_RESPONSE { match self.blockchain_db.fetch_header(starting_block.height + i as u64).await { Ok(Some(header)) => { let hash = header.hash(); headers.push(header); if hash == stopping_hash { break; } }, Err(err) => { error!( target: LOG_TARGET, "Could not fetch header at {}:{}", starting_block.height + i as u64, err.to_string() ); return Err(err.into()); }, _ => error!(target: LOG_TARGET, "Could not fetch header: None"), } } Ok(NodeCommsResponse::FetchHeadersAfterResponse(headers)) }, NodeCommsRequest::FetchMatchingUtxos(utxo_hashes) => { let mut res = Vec::with_capacity(utxo_hashes.len()); for (pruned_output, spent) in (self.blockchain_db.fetch_utxos(utxo_hashes).await?) .into_iter() .flatten() { if let PrunedOutput::NotPruned { output } = pruned_output { if !spent { res.push(output); } } } Ok(NodeCommsResponse::TransactionOutputs(res)) }, NodeCommsRequest::FetchMatchingTxos(hashes) => { let res = self .blockchain_db .fetch_utxos(hashes) .await? .into_iter() .filter_map(|opt| match opt { Some((PrunedOutput::NotPruned { output }, _)) => Some(output), _ => None, }) .collect(); Ok(NodeCommsResponse::TransactionOutputs(res)) }, NodeCommsRequest::FetchMatchingBlocks(block_nums) => { let mut blocks = Vec::with_capacity(block_nums.len()); for block_num in block_nums { debug!(target: LOG_TARGET, "A peer has requested block {}", block_num); match self.blockchain_db.fetch_block(block_num).await { Ok(block) => blocks.push(block), // We need to suppress the error as another node might ask for a block we dont have, so we // return ok([]) Err(e) => debug!( target: LOG_TARGET, "Could not provide requested block {} to peer because: {}", block_num, e ), } } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, NodeCommsRequest::FetchBlocksWithHashes(block_hashes) => { let mut blocks = Vec::with_capacity(block_hashes.len()); for block_hash in block_hashes { let block_hex = block_hash.to_hex(); debug!( target: LOG_TARGET, "A peer has requested a block with hash {}", block_hex ); match self.blockchain_db.fetch_block_by_hash(block_hash).await { Ok(Some(block)) => blocks.push(block), Ok(None) => warn!( target: LOG_TARGET, "Could not provide requested block {} to peer because not stored", block_hex, ), Err(e) => warn!( target: LOG_TARGET, "Could not provide requested block {} to peer because: {}", block_hex, e.to_string() ), } } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, NodeCommsRequest::FetchBlocksWithKernels(excess_sigs) => { let mut blocks = Vec::with_capacity(excess_sigs.len()); for sig in excess_sigs { let sig_hex = sig.get_signature().to_hex(); debug!( target: LOG_TARGET, "A peer has requested a block with kernel with sig {}", sig_hex ); match self.blockchain_db.fetch_block_with_kernel(sig).await { Ok(Some(block)) => blocks.push(block), Ok(None) => warn!( target: LOG_TARGET, "Could not provide requested block containing kernel with sig {} to peer because not \ stored", sig_hex ), Err(e) => warn!( target: LOG_TARGET, "Could not provide requested block containing kernel with sig {} to peer because: {}", sig_hex, e.to_string() ), } } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, NodeCommsRequest::FetchBlocksWithUtxos(hashes) => { let mut blocks = Vec::with_capacity(hashes.len()); for hash in hashes { let hash_hex = hash.to_hex(); debug!( target: LOG_TARGET, "A peer has requested a block with hash {}", hash_hex, ); match self.blockchain_db.fetch_block_with_utxo(hash).await { Ok(Some(block)) => blocks.push(block), Ok(None) => warn!( target: LOG_TARGET, "Could not provide requested block {} to peer because not stored", hash_hex, ), Err(e) => warn!( target: LOG_TARGET, "Could not provide requested block {} to peer because: {}", hash_hex, e.to_string() ), } } Ok(NodeCommsResponse::HistoricalBlocks(blocks)) }, NodeCommsRequest::GetHeaderByHash(hash) => { let header = self.blockchain_db.fetch_header_by_block_hash(hash).await?; Ok(NodeCommsResponse::BlockHeader(header)) }, NodeCommsRequest::GetBlockByHash(hash) => { let block = self.blockchain_db.fetch_block_by_hash(hash).await?; Ok(NodeCommsResponse::HistoricalBlock(Box::new(block))) }, NodeCommsRequest::GetNewBlockTemplate(request) => { let best_block_header = self.blockchain_db.fetch_tip_header().await?; let mut header = BlockHeader::from_previous(best_block_header.header()); let constants = self.consensus_manager.consensus_constants(header.height); header.version = constants.blockchain_version(); header.pow.pow_algo = request.algo; let constants_weight = constants.get_max_block_weight_excluding_coinbase(); let asking_weight = if request.max_weight > constants_weight || request.max_weight == 0 { constants_weight } else { request.max_weight }; let transactions = async_mempool::retrieve(self.mempool.clone(), asking_weight) .await? .into_iter() .map(|tx| Arc::try_unwrap(tx).unwrap_or_else(|tx| (*tx).clone())) .collect::<Vec<_>>(); debug!( target: LOG_TARGET, "Adding {} transaction(s) to new block template", transactions.len() ); let prev_hash = header.prev_hash.clone(); let height = header.height; let block_template = NewBlockTemplate::from_block( header.into_builder().with_transactions(transactions).build(), self.get_target_difficulty_for_next_block(request.algo, constants, prev_hash) .await?, self.consensus_manager.get_block_reward_at(height), ); debug!( target: LOG_TARGET, "New block template requested at height {}", block_template.header.height, ); trace!(target: LOG_TARGET, "{}", block_template); Ok(NodeCommsResponse::NewBlockTemplate(block_template)) }, NodeCommsRequest::GetNewBlock(block_template) => { let block = self.blockchain_db.prepare_new_block(block_template).await?; Ok(NodeCommsResponse::NewBlock { success: true, error: None, block: Some(block), }) }, NodeCommsRequest::FetchKernelByExcessSig(signature) => { let mut kernels = Vec::<TransactionKernel>::new(); match self.blockchain_db.fetch_kernel_by_excess_sig(signature).await { Ok(kernel) => match kernel { None => (), Some((kernel, _kernel_hash)) => { kernels.push(kernel); }, }, Err(err) => { error!(target: LOG_TARGET, "Could not fetch kernel {}", err.to_string()); return Err(err.into()); }, } Ok(NodeCommsResponse::TransactionKernels(kernels)) }, } } /// Handles a `NewBlock` message. Only a single `NewBlock` message can be handled at once to prevent extraneous /// requests for the full block. /// This may (asynchronously) block until the other request(s) complete or time out and so should typically be /// executed in a dedicated task. pub async fn handle_new_block_message( &mut self, new_block: NewBlock, source_peer: NodeId, ) -> Result<(), CommsInterfaceError> { let NewBlock { block_hash } = new_block; // Only a single block request can complete at a time. // As multiple NewBlock requests arrive from propagation, this semaphore prevents multiple requests to nodes for // the same full block. The first request that succeeds will stop the node from requesting the block from any // other node (block_exists is true). let _permit = self.new_block_request_semaphore.acquire().await; if self.blockchain_db.block_exists(block_hash.clone()).await? { debug!( target: LOG_TARGET, "Block with hash `{}` already stored", block_hash.to_hex() ); return Ok(()); } debug!( target: LOG_TARGET, "Block with hash `{}` is unknown. Requesting it from peer `{}`.", block_hash.to_hex(), source_peer.short_str() ); let mut block = self .outbound_nci .request_blocks_with_hashes_from_peer(vec![block_hash], Some(source_peer.clone())) .await?; match block.pop() { Some(block) => { self.handle_block(Arc::new(block.try_into_block()?), true.into(), Some(source_peer)) .await?; Ok(()) }, None => { // TODO: #banheuristic - peer propagated block hash for which it could not return the full block debug!( target: LOG_TARGET, "Peer `{}` failed to return the block that was requested.", source_peer.short_str() ); Err(CommsInterfaceError::InvalidPeerResponse(format!( "Invalid response from peer `{}`: Peer failed to provide the block that was propagated", source_peer.short_str() ))) }, } } /// Handle inbound blocks from remote nodes and local services. pub async fn handle_block( &self, block: Arc<Block>, broadcast: Broadcast, source_peer: Option<NodeId>, ) -> Result<BlockHash, CommsInterfaceError> { let block_hash = block.hash(); let block_height = block.header.height; info!( target: LOG_TARGET, "Block #{} ({}) received from {}", block_height, block_hash.to_hex(), source_peer .as_ref() .map(|p| format!("remote peer: {}", p)) .unwrap_or_else(|| "local services".to_string()) ); trace!(target: LOG_TARGET, "Block: {}", block); let add_block_result = self.blockchain_db.add_block(block.clone()).await; // Create block event on block event stream match add_block_result { Ok(block_add_result) => { trace!(target: LOG_TARGET, "Block event created: {}", block_add_result); let should_propagate = match &block_add_result { BlockAddResult::Ok(_) => true, BlockAddResult::BlockExists => false, BlockAddResult::OrphanBlock => false, BlockAddResult::ChainReorg { .. } => true, }; self.blockchain_db.cleanup_orphans().await?; self.publish_block_event(BlockEvent::ValidBlockAdded(block, block_add_result, broadcast)); if should_propagate && broadcast.is_true() { info!( target: LOG_TARGET, "Propagate block ({}) to network.", block_hash.to_hex() ); let exclude_peers = source_peer.into_iter().collect(); let new_block = NewBlock::new(block_hash.clone()); self.outbound_nci.propagate_block(new_block, exclude_peers).await?; } Ok(block_hash) }, Err(e) => { warn!( target: LOG_TARGET, "Block #{} ({}) validation failed: {:?}", block_height, block_hash.to_hex(), e ); self.publish_block_event(BlockEvent::AddBlockFailed(block, broadcast)); Err(CommsInterfaceError::ChainStorageError(e)) }, } } fn publish_block_event(&self, event: BlockEvent) { if let Err(event) = self.block_event_sender.send(Arc::new(event)) { debug!(target: LOG_TARGET, "No event subscribers. Event {} dropped.", event.0) } } async fn get_target_difficulty_for_next_block( &self, pow_algo: PowAlgorithm, constants: &ConsensusConstants, current_block_hash: HashOutput, ) -> Result<Difficulty, CommsInterfaceError> { let target_difficulty = self .blockchain_db .fetch_target_difficulty_for_next_block(pow_algo, current_block_hash) .await?; let target = target_difficulty.calculate( constants.min_pow_difficulty(pow_algo), constants.max_pow_difficulty(pow_algo), ); debug!(target: LOG_TARGET, "Target difficulty {} for PoW {}", target, pow_algo); Ok(target) } } impl<T> Clone for InboundNodeCommsHandlers<T> { fn clone(&self) -> Self { Self { block_event_sender: self.block_event_sender.clone(), blockchain_db: self.blockchain_db.clone(), mempool: self.mempool.clone(), consensus_manager: self.consensus_manager.clone(), new_block_request_semaphore: self.new_block_request_semaphore.clone(), outbound_nci: self.outbound_nci.clone(), } } }
} impl From<Broadcast> for bool {
managementLocksGetAtResourceGroupLevelSample.js
/* * Copyright (c) Microsoft Corporation. * Licensed under the MIT License. * * Code generated by Microsoft (R) AutoRest Code Generator. * Changes may cause incorrect behavior and will be lost if the code is regenerated. */ // Copyright (c) Microsoft Corporation. // Licensed under the MIT License. const { ManagementLockClient } = require("@azure/arm-locks"); const { DefaultAzureCredential } = require("@azure/identity"); /** * This sample demonstrates how to Gets a management lock at the resource group level. * * @summary Gets a management lock at the resource group level. * x-ms-original-file: specification/resources/resource-manager/Microsoft.Authorization/stable/2020-05-01/examples/ManagementLocks_GetAtResourceGroupLevel.json */ async function
() { const subscriptionId = "subscriptionId"; const resourceGroupName = "resourcegroupname"; const lockName = "testlock"; const credential = new DefaultAzureCredential(); const client = new ManagementLockClient(credential, subscriptionId); const result = await client.managementLocks.getAtResourceGroupLevel(resourceGroupName, lockName); console.log(result); } getManagementLockAtResourceGroupLevel().catch(console.error);
getManagementLockAtResourceGroupLevel
es.config.js
/* global module */ var pkg = require('../package.json');
'/src/index$': '<rootDir>/es/index', }, });
module.exports = Object.assign({}, pkg.jest, { rootDir: '../', moduleNameMapper: {
basichashringexample_test.go
package ring_test import ( "fmt" "hash/fnv" "math/rand" "time" "github.com/gholt/ring/lowring" ) // See https://github.com/gholt/ring/blob/master/BASIC_HASH_RING.md func Example_fromBasicHashRingDocument() { hash := func(x int) uint64 { hasher := fnv.New64a() hasher.Write([]byte(fmt.Sprintf("%d", x))) return hasher.Sum64() } randIntn := rand.New(rand.NewSource(0)).Intn const ITEMS = 1000000 const NODES = 100 r := lowring.New(1) for n := 0; n < NODES; n++ { r.AddNode(1, 0) } r.Rebalance(randIntn) // Copy the essential ring data ring1 := make([][]lowring.Node, len(r.ReplicaToPartitionToNode)) for replica, partitionToNode := range r.ReplicaToPartitionToNode { ring1[replica] = make([]lowring.Node, len(partitionToNode)) copy(ring1[replica], partitionToNode) } partitionCount1 := uint64(len(ring1[0])) countPerNode := make([]int, NODES) for i := 0; i < ITEMS; i++ { n := ring1[0][hash(i)%partitionCount1] countPerNode[n]++ } min := ITEMS max := 0 for n := 0; n < NODES; n++ { if countPerNode[n] < min { min = countPerNode[n] } if countPerNode[n] > max { max = countPerNode[n] }
float64(t-min)/float64(t)*100, float64(max-t)/float64(t)*100) r.AddNode(1, 0) // Reset wait time restrictions r.Rebalanced = r.Rebalanced.Add(-(time.Duration(r.ReassignmentWait) * time.Minute)) r.Rebalance(randIntn) // Copy the essential ring data ring2 := make([][]lowring.Node, len(r.ReplicaToPartitionToNode)) for replica, partitionToNode := range r.ReplicaToPartitionToNode { ring2[replica] = make([]lowring.Node, len(partitionToNode)) copy(ring2[replica], partitionToNode) } partitionCount2 := uint64(len(ring2[0])) moved := 0 for i := 0; i < ITEMS; i++ { h := hash(i) n1 := ring1[0][h%partitionCount1] n2 := ring2[0][h%partitionCount2] if n1 != n2 { moved++ } } fmt.Printf("%d items moved, %.02f%%.\n", moved, float64(moved)/float64(ITEMS)*100) // Output: // 9554 to 10289 assignments per node, target was 10000. // That's 4.46% under and 2.89% over. // 9815 items moved, 0.98%. }
} t := ITEMS / NODES fmt.Printf("%d to %d assignments per node, target was %d.\n", min, max, t) fmt.Printf("That's %.02f%% under and %.02f%% over.\n",
msg.rs
// Copyright 2018 The Epic Developers // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Message types that transit over the network and related serialization code. use num::FromPrimitive; use std::io::{Read, Write}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::time; use crate::core::core::hash::Hash; use crate::core::core::BlockHeader; use crate::core::pow::Difficulty; use crate::core::ser::{self, FixedLength, Readable, Reader, StreamingReader, Writeable, Writer}; use crate::core::{consensus, global}; use crate::types::{ Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS, }; use crate::util::read_write::read_exact; /// Current latest version of the protocol pub const PROTOCOL_VERSION: u32 = 1; /// Epic's user agent with current version pub const USER_AGENT: &'static str = concat!("MW/Epic ", env!("CARGO_PKG_VERSION")); /// Magic numbers expected in the header of every message const OTHER_MAGIC: [u8; 2] = [73, 43]; const FLOONET_MAGIC: [u8; 2] = [83, 59]; const MAINNET_MAGIC: [u8; 2] = [97, 61]; /// Max theoretical size of a block filled with outputs. const MAX_BLOCK_SIZE: u64 = (consensus::MAX_BLOCK_WEIGHT / consensus::BLOCK_OUTPUT_WEIGHT * 708) as u64; /// Types of messages. /// Note: Values here are *important* so we should only add new values at the /// end. enum_from_primitive! { #[derive(Debug, Clone, Copy, PartialEq)] pub enum Type { Error = 0, Hand = 1, Shake = 2, Ping = 3, Pong = 4, GetPeerAddrs = 5, PeerAddrs = 6, GetHeaders = 7, Header = 8, Headers = 9, GetBlock = 10, Block = 11, GetCompactBlock = 12, CompactBlock = 13, StemTransaction = 14, Transaction = 15, TxHashSetRequest = 16, TxHashSetArchive = 17, BanReason = 18, GetTransaction = 19, TransactionKernel = 20, } } // Max msg size for each msg type. fn max_msg_size(msg_type: Type) -> u64 { match msg_type { Type::Error => 0, Type::Hand => 128, Type::Shake => 88, Type::Ping => 16, Type::Pong => 16, Type::GetPeerAddrs => 4, Type::PeerAddrs => 4 + (1 + 16 + 2) * MAX_PEER_ADDRS as u64, Type::GetHeaders => 1 + 32 * MAX_LOCATORS as u64, Type::Header => 365, Type::Headers => 2 + 365 * MAX_BLOCK_HEADERS as u64, Type::GetBlock => 32, Type::Block => MAX_BLOCK_SIZE, Type::GetCompactBlock => 32, Type::CompactBlock => MAX_BLOCK_SIZE / 10, Type::StemTransaction => MAX_BLOCK_SIZE, Type::Transaction => MAX_BLOCK_SIZE, Type::TxHashSetRequest => 40, Type::TxHashSetArchive => 64, Type::BanReason => 64, Type::GetTransaction => 32, Type::TransactionKernel => 32, } } fn magic() -> [u8; 2] { match *global::CHAIN_TYPE.read() { global::ChainTypes::Floonet => FLOONET_MAGIC, global::ChainTypes::Mainnet => MAINNET_MAGIC, _ => OTHER_MAGIC, } } /// Read a header from the provided stream without blocking if the /// underlying stream is async. Typically headers will be polled for, so /// we do not want to block. pub fn read_header(stream: &mut dyn Read, msg_type: Option<Type>) -> Result<MsgHeader, Error> { let mut head = vec![0u8; MsgHeader::LEN]; if Some(Type::Hand) == msg_type { read_exact(stream, &mut head, time::Duration::from_millis(10), true)?; } else { read_exact(stream, &mut head, time::Duration::from_secs(10), false)?; } let header = ser::deserialize::<MsgHeader>(&mut &head[..])?; let max_len = max_msg_size(header.msg_type); // TODO 4x the limits for now to leave ourselves space to change things if header.msg_len > max_len * 4 { error!( "Too large read {}, had {}, wanted {}.", header.msg_type as u8, max_len, header.msg_len ); return Err(Error::Serialization(ser::Error::TooLargeReadErr)); } Ok(header) } /// Read a single item from the provided stream, always blocking until we /// have a result (or timeout). /// Returns the item and the total bytes read. pub fn read_item<T: Readable>(stream: &mut dyn Read) -> Result<(T, u64), Error> { let timeout = time::Duration::from_secs(20); let mut reader = StreamingReader::new(stream, timeout); let res = T::read(&mut reader)?; Ok((res, reader.total_bytes_read())) } /// Read a message body from the provided stream, always blocking /// until we have a result (or timeout). pub fn read_body<T: Readable>(h: &MsgHeader, stream: &mut dyn Read) -> Result<T, Error> { let mut body = vec![0u8; h.msg_len as usize]; read_exact(stream, &mut body, time::Duration::from_secs(20), true)?; ser::deserialize(&mut &body[..]).map_err(From::from) } /// Reads a full message from the underlying stream. pub fn read_message<T: Readable>(stream: &mut dyn Read, msg_type: Type) -> Result<T, Error> { let header = read_header(stream, Some(msg_type))?; if header.msg_type != msg_type { return Err(Error::BadMessage); } read_body(&header, stream) } pub fn write_to_buf<T: Writeable>(msg: T, msg_type: Type) -> Vec<u8> { // prepare the body first so we know its serialized length let mut body_buf = vec![]; ser::serialize(&mut body_buf, &msg).unwrap(); // build and serialize the header using the body size let mut msg_buf = vec![]; let blen = body_buf.len() as u64; ser::serialize(&mut msg_buf, &MsgHeader::new(msg_type, blen)).unwrap(); msg_buf.append(&mut body_buf); msg_buf } pub fn write_message<T: Writeable>( stream: &mut dyn Write, msg: T, msg_type: Type, ) -> Result<(), Error> { let buf = write_to_buf(msg, msg_type); stream.write_all(&buf[..])?; Ok(()) } /// Header of any protocol message, used to identify incoming messages. pub struct MsgHeader { magic: [u8; 2], /// Type of the message. pub msg_type: Type, /// Total length of the message in bytes. pub msg_len: u64, } impl MsgHeader { /// Creates a new message header. pub fn new(msg_type: Type, len: u64) -> MsgHeader { MsgHeader { magic: magic(), msg_type: msg_type, msg_len: len, } } } impl FixedLength for MsgHeader { const LEN: usize = 1 + 1 + 1 + 8; } impl Writeable for MsgHeader { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { ser_multiwrite!( writer, [write_u8, self.magic[0]], [write_u8, self.magic[1]], [write_u8, self.msg_type as u8], [write_u64, self.msg_len] ); Ok(()) } } impl Readable for MsgHeader { fn read(reader: &mut dyn Reader) -> Result<MsgHeader, ser::Error> { let m = magic(); reader.expect_u8(m[0])?; reader.expect_u8(m[1])?; let (t, len) = ser_multiread!(reader, read_u8, read_u64); match Type::from_u8(t) { Some(ty) => Ok(MsgHeader { magic: m, msg_type: ty, msg_len: len, }), None => Err(ser::Error::CorruptedData), } } } /// First part of a handshake, sender advertises its version and /// characteristics. pub struct Hand { /// protocol version of the sender pub version: u32, /// capabilities of the sender pub capabilities: Capabilities, /// randomly generated for each handshake, helps detect self pub nonce: u64, /// genesis block of our chain, only connect to peers on the same chain pub genesis: Hash, /// total difficulty accumulated by the sender, used to check whether sync /// may be needed pub total_difficulty: Difficulty, /// network address of the sender pub sender_addr: SockAddr, /// network address of the receiver pub receiver_addr: SockAddr, /// name of version of the software pub user_agent: String, } impl Writeable for Hand { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { ser_multiwrite!( writer, [write_u32, self.version], [write_u32, self.capabilities.bits()], [write_u64, self.nonce] ); self.total_difficulty.write(writer).unwrap(); self.sender_addr.write(writer).unwrap(); self.receiver_addr.write(writer).unwrap(); writer.write_bytes(&self.user_agent).unwrap(); self.genesis.write(writer).unwrap(); Ok(()) } } impl Readable for Hand { fn read(reader: &mut dyn Reader) -> Result<Hand, ser::Error> { let (version, capab, nonce) = ser_multiread!(reader, read_u32, read_u32, read_u64); let capabilities = Capabilities::from_bits_truncate(capab); let total_diff = Difficulty::read(reader)?; let sender_addr = SockAddr::read(reader)?; let receiver_addr = SockAddr::read(reader)?; let ua = reader.read_bytes_len_prefix()?; let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?; let genesis = Hash::read(reader)?; Ok(Hand { version: version, capabilities: capabilities, nonce: nonce, genesis: genesis, total_difficulty: total_diff, sender_addr: sender_addr, receiver_addr: receiver_addr, user_agent: user_agent, }) } } /// Second part of a handshake, receiver of the first part replies with its own /// version and characteristics. pub struct Shake { /// sender version pub version: u32, /// sender capabilities pub capabilities: Capabilities, /// genesis block of our chain, only connect to peers on the same chain pub genesis: Hash, /// total difficulty accumulated by the sender, used to check whether sync /// may be needed pub total_difficulty: Difficulty, /// name of version of the software pub user_agent: String, } impl Writeable for Shake { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { ser_multiwrite!( writer, [write_u32, self.version], [write_u32, self.capabilities.bits()] ); self.total_difficulty.write(writer).unwrap(); writer.write_bytes(&self.user_agent).unwrap(); self.genesis.write(writer).unwrap(); Ok(()) } } impl Readable for Shake { fn read(reader: &mut dyn Reader) -> Result<Shake, ser::Error> { let (version, capab) = ser_multiread!(reader, read_u32, read_u32); let capabilities = Capabilities::from_bits_truncate(capab); let total_diff = Difficulty::read(reader)?; let ua = reader.read_bytes_len_prefix()?; let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?; let genesis = Hash::read(reader)?; Ok(Shake { version: version, capabilities: capabilities, genesis: genesis, total_difficulty: total_diff, user_agent: user_agent, }) } } /// Ask for other peers addresses, required for network discovery. pub struct GetPeerAddrs { /// Filters on the capabilities we'd like the peers to have pub capabilities: Capabilities, } impl Writeable for GetPeerAddrs { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u32(self.capabilities.bits()) } } impl Readable for GetPeerAddrs { fn read(reader: &mut dyn Reader) -> Result<GetPeerAddrs, ser::Error> { let capab = reader.read_u32()?; let capabilities = Capabilities::from_bits_truncate(capab); Ok(GetPeerAddrs { capabilities }) } } /// Peer addresses we know of that are fresh enough, in response to /// GetPeerAddrs. #[derive(Debug)] pub struct PeerAddrs { pub peers: Vec<SockAddr>, } impl Writeable for PeerAddrs { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u32(self.peers.len() as u32)?; for p in &self.peers { p.write(writer).unwrap(); } Ok(()) } } impl Readable for PeerAddrs { fn read(reader: &mut dyn Reader) -> Result<PeerAddrs, ser::Error> { let peer_count = reader.read_u32()?; if peer_count > MAX_PEER_ADDRS { return Err(ser::Error::TooLargeReadErr); } else if peer_count == 0 { return Ok(PeerAddrs { peers: vec![] }); } // let peers = try_map_vec!([0..peer_count], |_| SockAddr::read(reader)); let mut peers = Vec::with_capacity(peer_count as usize); for _ in 0..peer_count { peers.push(SockAddr::read(reader)?); } Ok(PeerAddrs { peers: peers }) } } /// We found some issue in the communication, sending an error back, usually /// followed by closing the connection. pub struct PeerError { /// error code pub code: u32, /// slightly more user friendly message pub message: String, } impl Writeable for PeerError { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { ser_multiwrite!(writer, [write_u32, self.code], [write_bytes, &self.message]); Ok(()) } } impl Readable for PeerError { fn read(reader: &mut dyn Reader) -> Result<PeerError, ser::Error> { let code = reader.read_u32()?; let msg = reader.read_bytes_len_prefix()?; let message = String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData)?; Ok(PeerError { code: code, message: message, }) } } /// Only necessary so we can implement Readable and Writeable. Rust disallows /// implementing traits when both types are outside of this crate (which is the /// case for SocketAddr and Readable/Writeable). #[derive(Debug)] pub struct SockAddr(pub SocketAddr); impl Writeable for SockAddr { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error>
} impl Readable for SockAddr { fn read(reader: &mut dyn Reader) -> Result<SockAddr, ser::Error> { let v4_or_v6 = reader.read_u8()?; if v4_or_v6 == 0 { let ip = reader.read_fixed_bytes(4)?; let port = reader.read_u16()?; Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new( Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]), port, )))) } else { let ip = try_iter_map_vec!(0..8, |_| reader.read_u16()); let port = reader.read_u16()?; Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new( Ipv6Addr::new(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]), port, 0, 0, )))) } } } /// Serializable wrapper for the block locator. #[derive(Debug)] pub struct Locator { pub hashes: Vec<Hash>, } impl Writeable for Locator { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u8(self.hashes.len() as u8)?; for h in &self.hashes { h.write(writer)? } Ok(()) } } impl Readable for Locator { fn read(reader: &mut dyn Reader) -> Result<Locator, ser::Error> { let len = reader.read_u8()?; if len > (MAX_LOCATORS as u8) { return Err(ser::Error::TooLargeReadErr); } let mut hashes = Vec::with_capacity(len as usize); for _ in 0..len { hashes.push(Hash::read(reader)?); } Ok(Locator { hashes: hashes }) } } /// Serializable wrapper for a list of block headers. pub struct Headers { pub headers: Vec<BlockHeader>, } impl Writeable for Headers { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { writer.write_u16(self.headers.len() as u16)?; for h in &self.headers { h.write(writer)? } Ok(()) } } pub struct Ping { /// total difficulty accumulated by the sender, used to check whether sync /// may be needed pub total_difficulty: Difficulty, /// total height pub height: u64, } impl Writeable for Ping { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { self.total_difficulty.write(writer).unwrap(); self.height.write(writer).unwrap(); Ok(()) } } impl Readable for Ping { fn read(reader: &mut dyn Reader) -> Result<Ping, ser::Error> { let total_difficulty = Difficulty::read(reader)?; let height = reader.read_u64()?; Ok(Ping { total_difficulty, height, }) } } pub struct Pong { /// total difficulty accumulated by the sender, used to check whether sync /// may be needed pub total_difficulty: Difficulty, /// height accumulated by sender pub height: u64, } impl Writeable for Pong { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { self.total_difficulty.write(writer).unwrap(); self.height.write(writer).unwrap(); Ok(()) } } impl Readable for Pong { fn read(reader: &mut dyn Reader) -> Result<Pong, ser::Error> { let total_difficulty = Difficulty::read(reader)?; let height = reader.read_u64()?; Ok(Pong { total_difficulty, height, }) } } #[derive(Debug)] pub struct BanReason { /// the reason for the ban pub ban_reason: ReasonForBan, } impl Writeable for BanReason { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { let ban_reason_i32 = self.ban_reason as i32; ban_reason_i32.write(writer).unwrap(); Ok(()) } } impl Readable for BanReason { fn read(reader: &mut dyn Reader) -> Result<BanReason, ser::Error> { let ban_reason_i32 = match reader.read_i32() { Ok(h) => h, Err(_) => 0, }; let ban_reason = ReasonForBan::from_i32(ban_reason_i32).ok_or(ser::Error::CorruptedData)?; Ok(BanReason { ban_reason }) } } /// Request to get an archive of the full txhashset store, required to sync /// a new node. pub struct TxHashSetRequest { /// Hash of the block for which the txhashset should be provided pub hash: Hash, /// Height of the corresponding block pub height: u64, } impl Writeable for TxHashSetRequest { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { self.hash.write(writer)?; writer.write_u64(self.height)?; Ok(()) } } impl Readable for TxHashSetRequest { fn read(reader: &mut dyn Reader) -> Result<TxHashSetRequest, ser::Error> { Ok(TxHashSetRequest { hash: Hash::read(reader)?, height: reader.read_u64()?, }) } } /// Response to a txhashset archive request, must include a zip stream of the /// archive after the message body. pub struct TxHashSetArchive { /// Hash of the block for which the txhashset are provided pub hash: Hash, /// Height of the corresponding block pub height: u64, /// Size in bytes of the archive pub bytes: u64, } impl Writeable for TxHashSetArchive { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { self.hash.write(writer)?; ser_multiwrite!(writer, [write_u64, self.height], [write_u64, self.bytes]); Ok(()) } } impl Readable for TxHashSetArchive { fn read(reader: &mut dyn Reader) -> Result<TxHashSetArchive, ser::Error> { let hash = Hash::read(reader)?; let (height, bytes) = ser_multiread!(reader, read_u64, read_u64); Ok(TxHashSetArchive { hash, height, bytes, }) } }
{ match self.0 { SocketAddr::V4(sav4) => { ser_multiwrite!( writer, [write_u8, 0], [write_fixed_bytes, &sav4.ip().octets().to_vec()], [write_u16, sav4.port()] ); } SocketAddr::V6(sav6) => { writer.write_u8(1)?; for seg in &sav6.ip().segments() { writer.write_u16(*seg)?; } writer.write_u16(sav6.port())?; } } Ok(()) }
power.go
package powercycle import ( "fmt" "os"
"github.com/hpcng/warewulf/internal/pkg/wwlog" "github.com/spf13/cobra" ) func CobraRunE(cmd *cobra.Command, args []string) error { var returnErr error = nil var nodeList []node.NodeInfo n, err := node.New() if err != nil { wwlog.Printf(wwlog.ERROR, "Could not open node configuration: %s\n", err) os.Exit(1) } if len(args) >= 1 { nodeList, _ = n.SearchByNameList(args) } else { wwlog.Printf(wwlog.ERROR, "No requested nodes\n") os.Exit(255) } if len(nodeList) == 0 { wwlog.Printf(wwlog.ERROR, "No nodes found matching: '%s'\n", args[0]) os.Exit(255) } batchpool := batch.New(50) jobcount := len(nodeList) results := make(chan power.IPMI, jobcount) for _, node := range nodeList { if node.IpmiIpaddr.Get() == "" { wwlog.Printf(wwlog.ERROR, "%s: No IPMI IP address\n", node.Id.Get()) continue } ipmiCmd := power.IPMI{ NodeName: node.Id.Get(), HostName: node.IpmiIpaddr.Get(), User: node.IpmiUserName.Get(), Password: node.IpmiPassword.Get(), AuthType: "MD5", } batchpool.Submit(func() { ipmiCmd.PowerCycle() results <- ipmiCmd }) } batchpool.Run() close(results) for result := range results { out, err := result.Result() if err != nil { wwlog.Printf(wwlog.ERROR, "%s: %s\n", result.NodeName, out) returnErr = err continue } fmt.Printf("%s: %s\n", result.NodeName, out) } return returnErr }
"github.com/hpcng/warewulf/internal/pkg/batch" "github.com/hpcng/warewulf/internal/pkg/node" "github.com/hpcng/warewulf/internal/pkg/power"
RunInScope.ts
import * as path from "path"; import * as brs from "../"; import { Callable, StdlibArgument, ValueKind, BrsString, BrsInvalid, BrsType, RoArray, isBrsString, } from "../brsTypes"; import { BrsComponent } from "../brsTypes/components/BrsComponent"; import { Interpreter } from "../interpreter"; import { getVolumeByPath, getPath } from "../stdlib/File"; import { Scope } from "../interpreter/Environment"; /** * Runs a file (or set of files) **in the current global + module scope** with the provided arguments, returning either * the value returned by those files' `main` function or `invalid` if an error occurs. * * @param interpreter the interpreter hosting this call to `Run` * @param filenames a list of files to lex, parse, and run * @param args the arguments to pass into the found `main` function * * @returns the value returned by the executed file(s) if no errors are detected, otherwise `invalid` */ function
(interpreter: Interpreter, filenames: BrsString[], args: BrsType[]) { let volumes = filenames.map(filename => getVolumeByPath(interpreter, filename.value)); let pathsToFiles = filenames.map(filename => path.join(interpreter.options.root, getPath(filename.value)) ); // if the file-to-run doesn't exist, RBI returns invalid if (!volumes.every(volume => volume != null)) { return BrsInvalid.Instance; } let ast = brs.lexParseSync(pathsToFiles, interpreter.options); return interpreter.inSubEnv(subInterpreter => { // remove the original `main` function so we can execute the new file subInterpreter.environment.remove("main", Scope.Module); return subInterpreter.exec(ast, ...args)[0] || BrsInvalid.Instance; }); } export const RunInScope = new Callable( "RunInScope", ...Callable.variadic({ signature: { args: [new StdlibArgument("filename", ValueKind.String)], returns: ValueKind.Dynamic, }, impl: (interpreter: Interpreter, filename: BrsString, ...args: BrsType[]) => { return runFilesInScope(interpreter, [filename], args); }, }), ...Callable.variadic({ signature: { args: [new StdlibArgument("filenamearray", ValueKind.Object)], returns: ValueKind.Dynamic, }, impl: (interpreter: Interpreter, filenamearray: BrsComponent, ...args: BrsType[]) => { if ( filenamearray instanceof RoArray && filenamearray.getElements().every(isBrsString) ) { return runFilesInScope( interpreter, filenamearray.getElements() as BrsString[], args ); } // RBI seems to hard-reboot when passed a non-empty associative array, but returns invalid for empty // AA's. Let's return invalid to be safe. return BrsInvalid.Instance; }, }) );
runFilesInScope
bitfield_gdt.rs
#![feature(const_convert)] #![feature(const_option)] #![feature(const_mut_refs)] #![feature(const_trait_impl)] use const_bitfield::bitfield; use const_enum::ConstEnum; const KERNEL_CODE64: u64 = 0x00AF9B000000FFFF; const KERNEL_CODE32: u64 = 0x00CF9B000000FFFF; const KERNEL_DATA: u64 = 0x00CF93000000FFFF; #[derive(Copy, Clone, Debug, Eq, PartialEq)] #[repr(u8)] pub enum DescriptorType { SystemSegment = 0, UserSegment = 1, } impl const From<bool> for DescriptorType { fn from(value: bool) -> Self { match value { false => DescriptorType::SystemSegment, true => DescriptorType::UserSegment, } } } impl const From<DescriptorType> for bool { fn from(value: DescriptorType) -> Self { match value { DescriptorType::SystemSegment => false, DescriptorType::UserSegment => true, } } } #[derive(Copy, Clone, Debug, Eq, PartialEq, ConstEnum)] #[repr(u8)] pub enum SegmentType { DataReadOnly = 0b000, DataReadWrite = 0b001, DataReadOnlyDown = 0b010, DataReadWriteDown = 0b011, CodeExecOnly = 0b100, CodeExecRead = 0b101, CodeExecOnlyConforming = 0b110, CodeExecReadConforming = 0b111, } bitfield! { #[derive(Copy, Clone)] pub struct Descriptor(u64); u32, limit_0_15, set_limit_0_15: 15, 0; u32, base_0_15, set_base_0_15: 31, 16; u8, base_16_23, set_base_16_23: 39, 32; bool, accessed, set_accessed: 40; u8, from into SegmentType, segment_type, set_segment_type: 43, 41; bool, from into DescriptorType, descriptor_type, set_descriptor_type: 44; u8, privilege_level, set_privilege_level: 46, 45; bool, present, set_present: 47; u8, limit_16_19, set_limit_16_19: 51, 48; bool, long_mode, set_long_mode: 53; bool, size_flag, set_size_flag: 54; bool, granularity, set_granularity: 55; u8, base_24_31, set_base_24_31: 63, 56; } #[test] pub fn test_parse_kernel_code64() { let descriptor = Descriptor(KERNEL_CODE64); // base assert_eq!(descriptor.base_0_15(), 0); assert_eq!(descriptor.base_16_23(), 0); assert_eq!(descriptor.base_24_31(), 0); // limit assert_eq!(descriptor.limit_0_15(), 0xFFFF); assert_eq!(descriptor.limit_16_19(), 0xF); // flags assert_eq!(descriptor.accessed(), true); assert_eq!(descriptor.segment_type(), SegmentType::CodeExecRead); assert_eq!(descriptor.descriptor_type(), DescriptorType::UserSegment); assert_eq!(descriptor.privilege_level(), 0); assert_eq!(descriptor.present(), true); assert_eq!(descriptor.long_mode(), true); assert_eq!(descriptor.size_flag(), false); assert_eq!(descriptor.granularity(), true); } #[test] pub fn test_build_kernel_code64() { let mut descriptor = Descriptor(0); descriptor // base .set_base_0_15(0) .set_base_16_23(0) .set_base_24_31(0) // limit .set_limit_0_15(0xFFFF) .set_limit_16_19(0xF) // flags .set_accessed(true) .set_segment_type(SegmentType::CodeExecRead) .set_descriptor_type(DescriptorType::UserSegment) .set_privilege_level(0) .set_present(true) .set_long_mode(true) .set_size_flag(false) .set_granularity(true); assert_eq!(descriptor.0, KERNEL_CODE64); } #[test] pub fn test_parse_kernel_code32() { let descriptor = Descriptor(KERNEL_CODE32); // base assert_eq!(descriptor.base_0_15(), 0); assert_eq!(descriptor.base_16_23(), 0); assert_eq!(descriptor.base_24_31(), 0); // limit assert_eq!(descriptor.limit_0_15(), 0xFFFF); assert_eq!(descriptor.limit_16_19(), 0xF); // flags assert_eq!(descriptor.accessed(), true); assert_eq!(descriptor.segment_type(), SegmentType::CodeExecRead); assert_eq!(descriptor.descriptor_type(), DescriptorType::UserSegment); assert_eq!(descriptor.privilege_level(), 0); assert_eq!(descriptor.present(), true); assert_eq!(descriptor.long_mode(), false); assert_eq!(descriptor.size_flag(), true); assert_eq!(descriptor.granularity(), true); } #[test] pub fn test_build_kernel_code32() { let mut descriptor = Descriptor(0); descriptor // base .set_base_0_15(0) .set_base_16_23(0) .set_base_24_31(0) // limit .set_limit_0_15(0xFFFF) .set_limit_16_19(0xF) // flags .set_accessed(true) .set_segment_type(SegmentType::CodeExecRead) .set_descriptor_type(DescriptorType::UserSegment) .set_privilege_level(0) .set_present(true) .set_long_mode(false) .set_size_flag(true) .set_granularity(true); assert_eq!(descriptor.0, KERNEL_CODE32); } #[test] pub fn test_parse_kernel_data() { let descriptor = Descriptor(KERNEL_DATA); // base assert_eq!(descriptor.base_0_15(), 0); assert_eq!(descriptor.base_16_23(), 0); assert_eq!(descriptor.base_24_31(), 0); // limit assert_eq!(descriptor.limit_0_15(), 0xFFFF); assert_eq!(descriptor.limit_16_19(), 0xF); // flags assert_eq!(descriptor.accessed(), true); assert_eq!(descriptor.segment_type(), SegmentType::DataReadWrite); assert_eq!(descriptor.descriptor_type(), DescriptorType::UserSegment); assert_eq!(descriptor.privilege_level(), 0); assert_eq!(descriptor.present(), true); assert_eq!(descriptor.long_mode(), false); assert_eq!(descriptor.size_flag(), true); assert_eq!(descriptor.granularity(), true); } #[test] pub fn test_build_kernel_data() { let mut descriptor = Descriptor(0); descriptor // base .set_base_0_15(0) .set_base_16_23(0) .set_base_24_31(0) // limit .set_limit_0_15(0xFFFF) .set_limit_16_19(0xF) // flags .set_accessed(true) .set_segment_type(SegmentType::DataReadWrite) .set_descriptor_type(DescriptorType::UserSegment) .set_privilege_level(0) .set_present(true) .set_long_mode(false) .set_size_flag(true)
.set_granularity(true); assert_eq!(descriptor.0, KERNEL_DATA); }
key_id_impl.rs
use crate::software_vault::SoftwareVault; use crate::VaultError; use ockam_vault_core::{Hasher, KeyId, KeyIdVault, PublicKey, Secret}; impl KeyIdVault for SoftwareVault { fn get_secret_by_key_id(&self, key_id: &str) -> ockam_core::Result<Secret> { let index = self .entries .iter() .find(|(_, entry)| { if let Some(e_key_id) = entry.key_id() { e_key_id == key_id } else { false } }) .ok_or_else(|| Into::<ockam_core::Error>::into(VaultError::SecretNotFound))? .0; Ok(Secret::new(*index)) } fn compute_key_id_for_public_key(&self, public_key: &PublicKey) -> ockam_core::Result<KeyId> { let key_id = self.sha256(public_key.as_ref())?; Ok(hex::encode(key_id)) } } #[cfg(test)] mod tests { use crate::SoftwareVault; use ockam_vault_core::{ KeyIdVault, PublicKey, SecretAttributes, SecretPersistence, SecretType, SecretVault, CURVE25519_SECRET_LENGTH, }; #[test] fn compute_key_id_for_public_key() { let vault = SoftwareVault::new(); let public = hex::decode("68858ea1ea4e1ade755df7fb6904056b291d9781eb5489932f46e32f12dd192a") .unwrap(); let public = PublicKey::new(public.to_vec()); let key_id = vault.compute_key_id_for_public_key(&public).unwrap(); assert_eq!( key_id, "732af49a0b47c820c0a4cac428d6cb80c1fa70622f4a51708163dd87931bc942" ); } #[test] fn
() { let mut vault = SoftwareVault::new(); let attributes = SecretAttributes::new( SecretType::Curve25519, SecretPersistence::Ephemeral, CURVE25519_SECRET_LENGTH, ); let secret = vault.secret_generate(attributes).unwrap(); let public = vault.secret_public_key_get(&secret).unwrap(); let key_id = vault.compute_key_id_for_public_key(&public).unwrap(); let secret2 = vault.get_secret_by_key_id(&key_id).unwrap(); assert_eq!(secret.index(), secret2.index()); } }
get_secret_by_key_id
test_image_load_save.py
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the NiBabel package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## ''' Tests for loader function ''' from __future__ import division, print_function, absolute_import from io import BytesIO import shutil from os.path import dirname, join as pjoin from tempfile import mkdtemp import numpy as np from .. import analyze as ana from .. import spm99analyze as spm99 from .. import spm2analyze as spm2 from .. import nifti1 as ni1 from .. import loadsave as nils from .. import (Nifti1Image, Nifti1Header, Nifti1Pair, Nifti2Image, Nifti2Pair, Minc1Image, Minc2Image, Spm2AnalyzeImage, Spm99AnalyzeImage, AnalyzeImage, MGHImage, all_image_classes) from ..tmpdirs import InTemporaryDirectory from ..volumeutils import native_code, swapped_code from ..optpkg import optional_package from ..spatialimages import SpatialImage from numpy.testing import assert_array_equal, assert_array_almost_equal from nose.tools import assert_true, assert_equal, assert_raises _, have_scipy, _ = optional_package('scipy') # No scipy=>no SPM-format writing DATA_PATH = pjoin(dirname(__file__), 'data') MGH_DATA_PATH = pjoin(dirname(__file__), '..', 'freesurfer', 'tests', 'data') def round_trip(img): # round trip a nifti single sio = BytesIO() img.file_map['image'].fileobj = sio img.to_file_map() img2 = Nifti1Image.from_file_map(img.file_map) return img2 def test_conversion_spatialimages(): shape = (2, 4, 6) affine = np.diag([1, 2, 3, 1]) klasses = [klass for klass in all_image_classes if klass.rw and issubclass(klass, SpatialImage)] for npt in np.float32, np.int16: data = np.arange(np.prod(shape), dtype=npt).reshape(shape) for r_class in klasses: if not r_class.makeable: continue img = r_class(data, affine) img.set_data_dtype(npt) for w_class in klasses: if not w_class.makeable: continue img2 = w_class.from_image(img) assert_array_equal(img2.get_data(), data) assert_array_equal(img2.affine, affine) def test_save_load_endian(): shape = (2, 4, 6) affine = np.diag([1, 2, 3, 1]) data = np.arange(np.prod(shape), dtype='f4').reshape(shape) # Native endian image img = Nifti1Image(data, affine) assert_equal(img.header.endianness, native_code) img2 = round_trip(img) assert_equal(img2.header.endianness, native_code) assert_array_equal(img2.get_data(), data) # byte swapped endian image bs_hdr = img.header.as_byteswapped() bs_img = Nifti1Image(data, affine, bs_hdr) assert_equal(bs_img.header.endianness, swapped_code) # of course the data is the same because it's not written to disk assert_array_equal(bs_img.get_data(), data) # Check converting to another image cbs_img = AnalyzeImage.from_image(bs_img) # this will make the header native by doing the header conversion cbs_hdr = cbs_img.header assert_equal(cbs_hdr.endianness, native_code) # and the byte order follows it back into another image cbs_img2 = Nifti1Image.from_image(cbs_img) cbs_hdr2 = cbs_img2.header assert_equal(cbs_hdr2.endianness, native_code) # Try byteswapped round trip bs_img2 = round_trip(bs_img) bs_data2 = bs_img2.get_data() # now the data dtype was swapped endian, so the read data is too assert_equal(bs_data2.dtype.byteorder, swapped_code) assert_equal(bs_img2.header.endianness, swapped_code) assert_array_equal(bs_data2, data) # Now mix up byteswapped data and non-byteswapped header mixed_img = Nifti1Image(bs_data2, affine) assert_equal(mixed_img.header.endianness, native_code) m_img2 = round_trip(mixed_img) assert_equal(m_img2.header.endianness, native_code) assert_array_equal(m_img2.get_data(), data) def test_save_load(): shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3, 3] = [3, 2, 1] img = ni1.Nifti1Image(data, affine) img.set_data_dtype(npt) with InTemporaryDirectory() as pth: nifn = 'an_image.nii' sifn = 'another_image.img' ni1.save(img, nifn) re_img = nils.load(nifn) assert_true(isinstance(re_img, ni1.Nifti1Image)) assert_array_equal(re_img.get_data(), data) assert_array_equal(re_img.affine, affine) # These and subsequent del statements are to prevent confusing # windows errors when trying to open files or delete the # temporary directory. del re_img if have_scipy: # skip we we cannot read .mat files spm2.save(img, sifn) re_img2 = nils.load(sifn) assert_true(isinstance(re_img2, spm2.Spm2AnalyzeImage)) assert_array_equal(re_img2.get_data(), data) assert_array_equal(re_img2.affine, affine) del re_img2 spm99.save(img, sifn) re_img3 = nils.load(sifn) assert_true(isinstance(re_img3, spm99.Spm99AnalyzeImage)) assert_array_equal(re_img3.get_data(), data) assert_array_equal(re_img3.affine, affine) ni1.save(re_img3, nifn) del re_img3 re_img = nils.load(nifn) assert_true(isinstance(re_img, ni1.Nifti1Image)) assert_array_equal(re_img.get_data(), data) assert_array_equal(re_img.affine, affine) del re_img def test_two_to_one(): # test going from two to one file in save shape = (2, 4, 6) npt = np.float32 data = np.arange(np.prod(shape), dtype=npt).reshape(shape) affine = np.diag([1, 2, 3, 1]) affine[:3, 3] = [3, 2, 1] # single file format img = ni1.Nifti1Image(data, affine) assert_equal(img.header['magic'], b'n+1') str_io = BytesIO() img.file_map['image'].fileobj = str_io # check that the single format vox offset stays at zero img.to_file_map() assert_equal(img.header['magic'], b'n+1') assert_equal(img.header['vox_offset'], 0) # make a new pair image, with the single image header pimg = ni1.Nifti1Pair(data, affine, img.header) isio = BytesIO() hsio = BytesIO() pimg.file_map['image'].fileobj = isio pimg.file_map['header'].fileobj = hsio pimg.to_file_map() # the offset stays at zero (but is 352 on disk) assert_equal(pimg.header['magic'], b'ni1') assert_equal(pimg.header['vox_offset'], 0) assert_array_equal(pimg.get_data(), data) # same for from_image, going from single image to pair format ana_img = ana.AnalyzeImage.from_image(img) assert_equal(ana_img.header['vox_offset'], 0) # back to the single image, save it again to a stringio str_io = BytesIO() img.file_map['image'].fileobj = str_io img.to_file_map() assert_equal(img.header['vox_offset'], 0) aimg = ana.AnalyzeImage.from_image(img) assert_equal(aimg.header['vox_offset'], 0) aimg = spm99.Spm99AnalyzeImage.from_image(img) assert_equal(aimg.header['vox_offset'], 0) aimg = spm2.Spm2AnalyzeImage.from_image(img) assert_equal(aimg.header['vox_offset'], 0) nfimg = ni1.Nifti1Pair.from_image(img) assert_equal(nfimg.header['vox_offset'], 0) # now set the vox offset directly hdr = nfimg.header hdr['vox_offset'] = 16 assert_equal(nfimg.header['vox_offset'], 16) # check it gets properly set by the nifti single image nfimg = ni1.Nifti1Image.from_image(img) assert_equal(nfimg.header['vox_offset'], 0) def test_negative_load_save(): shape = (1, 2, 5) data = np.arange(10).reshape(shape) - 10.0 affine = np.eye(4) hdr = ni1.Nifti1Header() hdr.set_data_dtype(np.int16) img = Nifti1Image(data, affine, hdr) str_io = BytesIO() img.file_map['image'].fileobj = str_io img.to_file_map() str_io.seek(0) re_img = Nifti1Image.from_file_map(img.file_map) assert_array_almost_equal(re_img.get_data(), data, 4) def test_filename_save(): # This is to test the logic in the load and save routines, relating # extensions to filetypes # Tuples of class, ext, loadedclass
def test_analyze_detection(): # Test detection of Analyze, Nifti1 and Nifti2 # Algorithm is as described in loadsave:which_analyze_type def wat(hdr): return nils.which_analyze_type(hdr.binaryblock) n1_hdr = Nifti1Header(b'\0' * 348, check=False) assert_equal(wat(n1_hdr), None) n1_hdr['sizeof_hdr'] = 540 assert_equal(wat(n1_hdr), 'nifti2') assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti2') n1_hdr['sizeof_hdr'] = 348 assert_equal(wat(n1_hdr), 'analyze') assert_equal(wat(n1_hdr.as_byteswapped()), 'analyze') n1_hdr['magic'] = b'n+1' assert_equal(wat(n1_hdr), 'nifti1') assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1') n1_hdr['magic'] = b'ni1' assert_equal(wat(n1_hdr), 'nifti1') assert_equal(wat(n1_hdr.as_byteswapped()), 'nifti1') # Doesn't matter what magic is if it's not a nifti1 magic n1_hdr['magic'] = b'ni2' assert_equal(wat(n1_hdr), 'analyze') n1_hdr['sizeof_hdr'] = 0 n1_hdr['magic'] = b'' assert_equal(wat(n1_hdr), None) n1_hdr['magic'] = 'n+1' assert_equal(wat(n1_hdr), 'nifti1') n1_hdr['magic'] = 'ni1' assert_equal(wat(n1_hdr), 'nifti1') def test_guessed_image_type(): # Test whether we can guess the image type from example files assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'example4d.nii.gz')), Nifti1Image) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'nifti1.hdr')), Nifti1Pair) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'example_nifti2.nii.gz')), Nifti2Image) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'nifti2.hdr')), Nifti2Pair) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'tiny.mnc')), Minc1Image) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'small.mnc')), Minc2Image) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'test.mgz')), MGHImage) assert_equal(nils.guessed_image_type( pjoin(DATA_PATH, 'analyze.hdr')), Spm2AnalyzeImage) def test_fail_save(): with InTemporaryDirectory(): dataobj = np.ones((10, 10, 10), dtype=np.float16) affine = np.eye(4, dtype=np.float32) img = SpatialImage(dataobj, affine) # Fails because float16 is not supported. with assert_raises(AttributeError): nils.save(img, 'foo.nii.gz') del img
inklass_ext_loadklasses = ( (Nifti1Image, '.nii', Nifti1Image), (Nifti2Image, '.nii', Nifti2Image), (Nifti1Pair, '.nii', Nifti1Image), (Nifti2Pair, '.nii', Nifti2Image), (Nifti1Image, '.img', Nifti1Pair), (Nifti2Image, '.img', Nifti2Pair), (Nifti1Pair, '.img', Nifti1Pair), (Nifti2Pair, '.img', Nifti2Pair), (Nifti1Image, '.hdr', Nifti1Pair), (Nifti2Image, '.hdr', Nifti2Pair), (Nifti1Pair, '.hdr', Nifti1Pair), (Nifti2Pair, '.hdr', Nifti2Pair), (Minc1Image, '.nii', Nifti1Image), (Minc1Image, '.img', Nifti1Pair), (Spm2AnalyzeImage, '.nii', Nifti1Image), (Spm2AnalyzeImage, '.img', Spm2AnalyzeImage), (Spm99AnalyzeImage, '.nii', Nifti1Image), (Spm99AnalyzeImage, '.img', Spm2AnalyzeImage), (AnalyzeImage, '.nii', Nifti1Image), (AnalyzeImage, '.img', Spm2AnalyzeImage), ) shape = (2, 4, 6) affine = np.diag([1, 2, 3, 1]) data = np.arange(np.prod(shape), dtype='f4').reshape(shape) for inklass, out_ext, loadklass in inklass_ext_loadklasses: if not have_scipy: # We can't load a SPM analyze type without scipy. These types have # a 'mat' file (the type we can't load) if ('mat', '.mat') in loadklass.files_types: continue img = inklass(data, affine) try: pth = mkdtemp() fname = pjoin(pth, 'image' + out_ext) nils.save(img, fname) rt_img = nils.load(fname) assert_array_almost_equal(rt_img.get_data(), data) assert_true(type(rt_img) is loadklass) # delete image to allow file close. Otherwise windows # raises an error when trying to delete the directory del rt_img finally: shutil.rmtree(pth)
ssl-test.js
// Copyright IBM Corp. 2014,2016. All Rights Reserved. // Node module: strong-soap // This file is licensed under the MIT License. // License text available at https://opensource.org/licenses/MIT "use strict"; var fs = require('fs'), soap = require('..').soap, https = require('https'), constants = require('constants'), assert = require('assert'); var test = {}; test.service = { StockQuoteService: { StockQuotePort: { GetLastTradePrice: function(args) { if (args.tickerSymbol === 'trigger error') { throw new Error('triggered server error'); } else { return {TradePrice: {price: 19.56}}; } } } } }; test.sslOptions = { key: fs.readFileSync(__dirname + '/certs/agent2-key.pem'), cert: fs.readFileSync(__dirname + '/certs/agent2-cert.pem') }; describe('SOAP Client(SSL)', function() { before(function(done) { fs.readFile(__dirname + '/wsdl/strict/stockquote.wsdl', 'utf8', function(err, data) {
}); }); beforeEach(function(done) { test.server = https.createServer(test.sslOptions, function(req, res) { res.statusCode = 404; res.end(); }).listen(51515, function() { test.soapServer = soap.listen(test.server, '/stockquote', test.service, test.wsdl); test.baseUrl = 'https://' + test.server.address().address + ':' + test.server.address().port; if (test.server.address().address === '0.0.0.0' || test.server.address().address === '::') { test.baseUrl = 'https://127.0.0.1:' + test.server.address().port; } done(); }); }); afterEach(function(done) { test.server.close(function() { test.server = null; delete test.soapServer; test.soapServer = null; done(); }); }); it('should connect to an SSL server', function(done) { soap.createClient(__dirname + '/wsdl/strict/stockquote.wsdl', function(err, client) { assert.ok(!err); client.setEndpoint(test.baseUrl + '/stockquote'); client.setSecurity({ addOptions: function(options) { options.cert = test.sslOptions.cert, options.key = test.sslOptions.key, options.rejectUnauthorized = false; options.secureOptions = constants.SSL_OP_NO_TLSv1_2; options.strictSSL = false; options.agent = new https.Agent(options); }, toXML: function() { return ''; } }); client.GetLastTradePrice({TradePriceRequest: {tickerSymbol: 'AAPL'}}, function(err, result) { assert.ok(!err); assert.equal(19.56, parseFloat(result.price)); done(); }); }); }); });
assert.ok(!err); test.wsdl = data; done();
GranuleResultsHighlights.js
import React from 'react' import PropTypes from 'prop-types' import { FaMap } from 'react-icons/fa'
import { locationPropType } from '../../util/propTypes/location' import PortalLinkContainer from '../../containers/PortalLinkContainer/PortalLinkContainer' import Skeleton from '../Skeleton/Skeleton' import EDSCIcon from '../EDSCIcon/EDSCIcon' import './GranuleResultsHighlights.scss' const granuleListItemSkeletonStyle = { height: '99px' } const granuleListTotalStyle = { height: '18px' } export const GranuleResultsHighlights = ({ granuleCount, granules, isLoaded, isLoading, location, visibleGranules }) => ( <div className="granule-results-highlights"> <div className="granule-results-highlights__count"> { (!isLoaded) && ( <Skeleton shapes={granuleListTotal} containerStyle={granuleListTotalStyle} variant="dark" /> ) } { (isLoaded && !isLoading) && ( `Showing ${commafy(visibleGranules)} of ${commafy( granuleCount )} matching ${pluralize('granule', granuleCount)}` ) } </div> <ul className="granule-results-highlights__list"> { (!isLoaded) && ( <> { [1, 2, 3].map((item, i) => { const key = `granule_loader_${i}` return ( <Skeleton key={key} className="granule-results-highlights__item" containerStyle={granuleListItemSkeletonStyle} shapes={granuleListItem} variant="dark" /> ) }) } </> ) } { (isLoaded && !isLoading) && ( <> { granules.map((granule, i) => { const { id, title, formattedTemporal } = granule const [ timeStart, timeEnd ] = formattedTemporal const key = `${id}_${i}` return ( <li key={key} className="granule-results-highlights__item"> <header className="granule-results-highlights__item-header"> <h4 className="granule-results-highlights__item-title">{title}</h4> </header> <div className="granule-results-highlights__item-body"> <div className="granule-results-highlights__temporal-row"> <h5 className="granule-results-highlights__temporal-label"> Start </h5> <p className="granule-results-highlights__temporal-value">{timeStart}</p> </div> <div className="granule-results-highlights__temporal-row"> <h5 className="granule-results-highlights__temporal-label"> End </h5> <p className="granule-results-highlights__temporal-value">{timeEnd}</p> </div> </div> </li> ) }) } </> ) } </ul> <div className="granule-results-highlights__footer"> <PortalLinkContainer className="granule-results-header__title-link granule-results-header__title-link-icon" to={{ pathname: '/search/granules', search: location.search }} > <EDSCIcon icon={FaMap} /> {' View Granules'} </PortalLinkContainer> </div> </div> ) GranuleResultsHighlights.propTypes = { granuleCount: PropTypes.number.isRequired, granules: PropTypes.arrayOf(PropTypes.shape({})).isRequired, isLoaded: PropTypes.bool.isRequired, isLoading: PropTypes.bool.isRequired, location: locationPropType.isRequired, visibleGranules: PropTypes.number.isRequired } export default GranuleResultsHighlights
import { commafy } from '../../util/commafy' import { granuleListItem, granuleListTotal } from './skeleton' import { pluralize } from '../../util/pluralize'
es6.reflect.get.js
// 26.1.6 Reflect.get(target, propertyKey [, receiver]) var $ = require('./$') , has = require('./$.has') , $export = require('./$.export') , isObject = require('./$.is-object') , anObject = require('./$.an-object'); function
(target, propertyKey/*, receiver*/) { var receiver = arguments.length < 3 ? target : arguments[2] , desc, proto; if (anObject(target) === receiver) return target[propertyKey]; if (desc = $.getDesc(target, propertyKey)) return has(desc, 'value') ? desc.value : desc.get !== undefined ? desc.get.call(receiver) : undefined; if (isObject(proto = $.getProto(target))) return get(proto, propertyKey, receiver); } $export($export.S, 'Reflect', {get: get});
get
lib.rs
#![cfg_attr(not(feature = "std"), no_std)] /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: /// <https://substrate.dev/docs/en/knowledgebase/runtime/frame> pub use pallet::*; use frame_support::{ PalletId, traits::{Currency, EnsureOrigin, ReservableCurrency, OnUnbalanced, Get, ExistenceRequirement::{KeepAlive}}, codec::{Encode, Decode} }; use sp_std::{vec, vec::Vec, convert::{TryInto}}; use sp_runtime::traits::{Hash, AccountIdConversion}; #[cfg(test)] mod mock; #[cfg(test)] mod tests; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; #[derive(Encode, Decode, Default, Clone, PartialEq)] pub struct Project<AccountId> { pub total_votes: u128, pub grants: u128, pub support_area: u128, pub withdrew: u128, pub name: Vec<u8>, pub owner: AccountId, } #[derive(Encode, Decode, Default, Clone, PartialEq)] pub struct Round { pub ongoing: bool, pub support_pool: u128, pub pre_tax_support_pool: u128, pub total_support_area: u128, pub total_tax: u128, } type ProjectOf<T> = Project<<T as frame_system::Config>::AccountId>; type BalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::Balance; type NegativeImbalanceOf<T> = <<T as Config>::Currency as Currency<<T as frame_system::Config>::AccountId>>::NegativeImbalance; #[frame_support::pallet] pub mod pallet { use frame_support::{dispatch::DispatchResultWithPostInfo, pallet_prelude::*}; use frame_system::pallet_prelude::*; use super::*; /// Configure the pallet by specifying the parameters and types on which it depends. #[pallet::config] pub trait Config: frame_system::Config { /// Because this pallet emits events, it depends on the runtime's definition of an event. type Event: From<Event<Self>> + IsType<<Self as frame_system::Config>::Event>; type Currency: ReservableCurrency<Self::AccountId>; #[pallet::constant] type PalletId: Get<PalletId>; /// Origin from which admin must come. type AdminOrigin: EnsureOrigin<Self::Origin>; /// What to do with slashed funds. type Slashed: OnUnbalanced<NegativeImbalanceOf<Self>>; /// UnitOfVote, 0.001 Unit token type UnitOfVote: Get<u128>; /// Number of base unit for each vote type NumberOfUnitPerVote: Get<u128>; /// The ration of fee based on the number of unit type FeeRatioPerVote: Get<u128>; /// The minimum length of project name type NameMinLength: Get<usize>; /// The maximum length of project name type NameMaxLength: Get<usize>; } #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] pub struct Pallet<T>(_); // The pallet's runtime storage items. // https://substrate.dev/docs/en/knowledgebase/runtime/storage #[pallet::storage] #[pallet::getter(fn rounds)] // Learn more about declaring storage items: // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items pub(super) type Rounds<T> = StorageMap<_, Blake2_128Concat, u32, Round, ValueQuery>; #[pallet::storage] #[pallet::getter(fn projects)] pub(super) type Projects<T: Config> = StorageDoubleMap<_, Blake2_128Concat, u32, Blake2_128Concat, T::Hash, ProjectOf<T>, ValueQuery>; #[pallet::storage] pub(super) type ProjectVotes<T: Config> = StorageDoubleMap<_, Blake2_128Concat, T::Hash, Blake2_128Concat, T::AccountId, u128, ValueQuery>; // Pallets use events to inform users when important changes are made. // https://substrate.dev/docs/en/knowledgebase/runtime/events #[pallet::event] #[pallet::metadata(T::AccountId = "AccountId", T::Hash = "Hash")] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event<T: Config> { /// Event documentation should end with an array that provides descriptive names for event /// parameters. [project_hash, who] ProjectRegistered(T::Hash, T::AccountId), /// parameters. [project_hash, balance of cost] VoteCost(T::Hash, u128), /// parameters. [project_hash, who, number of ballots] VoteSucceed(T::Hash, T::AccountId, u128), /// parameters. [round_id] RoundStarted(u32), /// parameters. [round_id] RoundEnded(u32), /// parameters. [round_id, who, amount] DonateSucceed(u32, T::AccountId, u128), } // Errors inform users that something went wrong. #[pallet::error] pub enum Error<T> { /// Error names should be descriptive. NoneValue, /// Errors should have helpful documentation associated with them. StorageOverflow, DuplicateProject, ProjectNotExist, ProjectNameTooLong, ProjectNameTooShort, InvalidBallot, DonationTooSmall, RoundExisted, RoundNotExist, RoundHasEnded, DuplicateRound, } #[pallet::hooks] impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {} // Dispatchable functions allows users to interact with the pallet and invoke state changes. // These functions materialize as "extrinsics", which are often compared to transactions. // Dispatchable functions must be annotated with a weight and must return a DispatchResult. #[pallet::call] impl<T: Config> Pallet<T> { /// An example dispatchable that takes a singles value as a parameter, writes the value to /// storage and emits an event. This function must be dispatched by a signed extrinsic. #[pallet::weight(10_000 + T::DbWeight::get().writes(1))] pub fn donate(origin: OriginFor<T>, round_id: u32, #[pallet::compact] amount: BalanceOf<T>) -> DispatchResultWithPostInfo { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. // https://substrate.dev/docs/en/knowledgebase/runtime/origin let who = ensure_signed(origin)?; ensure!(Rounds::<T>::contains_key(&round_id), Error::<T>::RoundNotExist); let round = Rounds::<T>::get(round_id); ensure!(true == round.ongoing, Error::<T>::RoundHasEnded); // the minimum unit, make sure the donate is greater than this let min_unit_number = Self::cal_amount(1u128, false); let amount_number = Self::balance_to_u128(amount); let fee_number = T::FeeRatioPerVote::get().checked_mul(amount_number / T::NumberOfUnitPerVote::get()).unwrap(); ensure!(amount_number > min_unit_number, Error::<T>::DonationTooSmall); let _ = T::Currency::transfer(&who, &Self::account_id(), amount, KeepAlive); // update the round Rounds::<T>::mutate(round_id, |rnd| { let ptsp = rnd.pre_tax_support_pool; let sp = rnd.support_pool; let tt = rnd.total_tax; rnd.pre_tax_support_pool = amount_number.checked_add(ptsp).unwrap(); rnd.support_pool = (amount_number-fee_number).checked_add(sp).unwrap(); rnd.total_tax = fee_number.checked_add(tt).unwrap(); }); Self::deposit_event(Event::DonateSucceed(round_id, who, Self::balance_to_u128(amount))); Ok(().into()) } #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn start_round(origin: OriginFor<T>, round_id: u32) -> DispatchResultWithPostInfo { // Only amdin can control the round T::AdminOrigin::ensure_origin(origin)?; ensure!(!Rounds::<T>::contains_key(&round_id), Error::<T>::RoundExisted); let round = Round { ongoing: true, support_pool: 0, pre_tax_support_pool: 0, total_support_area: 0, total_tax: 0 }; Rounds::<T>::insert(round_id, round); Self::deposit_event(Event::RoundStarted(round_id)); Ok(().into()) } /// End an `ongoing` round and distribute the funds in sponsor pool, any invalid index or round status will cause errors #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn end_round(origin: OriginFor<T>, round_id: u32) -> DispatchResultWithPostInfo
/// Register a project in an ongoing round, so that it can be voted #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn register_project(origin: OriginFor<T>, round_id: u32, hash: T::Hash, name: Vec<u8>) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(name.len() >= T::NameMinLength::get(), Error::<T>::ProjectNameTooShort); ensure!(name.len() <= T::NameMaxLength::get(), Error::<T>::ProjectNameTooLong); ensure!(!Projects::<T>::contains_key(&round_id, &hash), Error::<T>::DuplicateProject); let project = Project { total_votes: 0, grants: 0, support_area: 0, withdrew: 0, name: name, owner: who.clone(), }; Projects::<T>::insert(round_id, hash, project); Self::deposit_event(Event::ProjectRegistered(hash, who)); Ok(().into()) } /// Vote to a project, this function will transfer corresponding amount of token per your input ballot #[pallet::weight(10_000 + T::DbWeight::get().reads_writes(1,1))] pub fn vote(origin: OriginFor<T>, round_id: u32, hash: T::Hash, ballot: u128) -> DispatchResultWithPostInfo { let who = ensure_signed(origin)?; ensure!(Projects::<T>::contains_key(&round_id, &hash), Error::<T>::ProjectNotExist); ensure!(ballot > 0, Error::<T>::InvalidBallot); // check whether this round still ongoing ensure!(Rounds::<T>::contains_key(&round_id), Error::<T>::RoundNotExist); let round = Rounds::<T>::get(round_id); ensure!(true == round.ongoing, Error::<T>::RoundHasEnded); // need to calculate hash of project hash and round_id combination here to avoid conflicts of projects in different rounds let vote_hash = T::Hashing::hash_of(&(&hash, &round_id)); let voted = ProjectVotes::<T>::get(vote_hash, &who); let cost = Self::cal_cost(voted, ballot); let amount = Self::cal_amount(cost, false); let fee = Self::cal_amount(cost, true); // transfer first, update last, as transfer will ensure the free balance is enough let _ = T::Currency::transfer(&who, &Self::account_id(), Self::u128_to_balance(amount), KeepAlive); // update the project and corresponding round ProjectVotes::<T>::insert(vote_hash, &who, ballot+voted); Projects::<T>::mutate(round_id, hash, |poj| { let support_area = ballot.checked_mul(poj.total_votes - voted).unwrap(); poj.support_area = support_area.checked_add(poj.support_area).unwrap(); poj.total_votes += ballot; poj.grants += amount - fee; //debug::info!("Total votes: {:?}, Current votes: {:?}, Support Area: {:?},Est cost: {:?}", // poj.total_votes, voted, support_area, cost); Rounds::<T>::mutate(round_id, |rnd| { let tsa = rnd.total_support_area; let tt = rnd.total_tax; rnd.total_support_area = support_area.checked_add(tsa).unwrap(); rnd.total_tax = fee.checked_add(tt).unwrap(); }); }); Self::deposit_event(Event::VoteSucceed(hash, who, ballot)); Ok(().into()) } } } impl<T: Config> Pallet<T> { // Add public immutables and private mutables. /// refer https://github.com/paritytech/substrate/blob/743accbe3256de2fc615adcaa3ab03ebdbbb4dbd/frame/treasury/src/lib.rs#L351 /// /// This actually does computation. If you need to keep using it, then make sure you cache the /// value and only call this once. pub fn account_id() -> T::AccountId { T::PalletId::get().into_account() } pub fn cal_cost(voted: u128, ballot: u128) -> u128 { let mut points = ballot.checked_mul(ballot.checked_add(1).unwrap()).unwrap() / 2; points = points.checked_add(ballot.checked_mul(voted).unwrap()).unwrap(); return points; } pub fn cal_amount(amount: u128, is_fee: bool) -> u128 { let uov = T::UnitOfVote::get(); let nup = T::NumberOfUnitPerVote::get(); let frpv = T::FeeRatioPerVote::get(); if is_fee { uov.checked_mul(frpv).unwrap().checked_mul(amount).unwrap() } else { uov.checked_mul(nup).unwrap().checked_mul(amount).unwrap() } } pub fn u128_to_balance(cost: u128) -> BalanceOf<T> { TryInto::<BalanceOf::<T>>::try_into(cost).ok().unwrap() } pub fn balance_to_u128(balance: BalanceOf<T>) -> u128 { TryInto::<u128>::try_into(balance).ok().unwrap() } // TODO: There is a bug for serde_json, can not use u128 https://github.com/paritytech/substrate/issues/4641 pub fn vote_cost(who: T::AccountId, round_id:u32, hash: T::Hash, ballot: u32) -> u32 { // need to calculate hash of project hash and round_id combination here to avoid conflicts of projects in different rounds let vote_hash = T::Hashing::hash_of(&(&hash, &round_id)); let voted = ProjectVotes::<T>::get(vote_hash, &who); TryInto::<u32>::try_into(Self::cal_cost(voted, ballot.into())).ok().unwrap() } // TODO, using struct is a little complicate, use tuple instead // (project_id, total_votes, grants, support_grants) pub fn projects_per_round(round_id:u32) -> Vec<(T::Hash, u32, u32, u32)> { let mut projects = vec![]; let round = Rounds::<T>::get(round_id); let area = round.total_support_area; let pool = round.support_pool; for (hash, project) in Projects::<T>::iter_prefix(round_id) { let mut sg = 0; if area > 0 { sg = project.support_area.checked_mul(pool/area).unwrap() } let total_votes = TryInto::<u32>::try_into(project.total_votes).ok().unwrap(); let grants = TryInto::<u32>::try_into(project.grants.checked_div(T::UnitOfVote::get()).unwrap()).ok().unwrap(); let support_grants = TryInto::<u32>::try_into(sg.checked_div(T::UnitOfVote::get()).unwrap()).ok().unwrap(); projects.push((hash, total_votes, grants, support_grants)) } projects } }
{ // Only amdin can control the round T::AdminOrigin::ensure_origin(origin)?; ensure!(Rounds::<T>::contains_key(&round_id), Error::<T>::RoundNotExist); let mut round = Rounds::<T>::get(round_id); ensure!(true == round.ongoing, Error::<T>::RoundHasEnded); let area = round.total_support_area; let pool = round.support_pool; for (hash, mut project) in Projects::<T>::iter_prefix(round_id) { if area > 0 { let total = project.grants; project.grants = total.checked_add( project.support_area.checked_mul(pool/area).unwrap() ).unwrap(); } //debug::info!("Hash: {:?}, Total votes: {:?}, Grants: {:?}", hash, project.total_votes, project.grants); // reckon the final grants let _ = T::Currency::transfer( &Self::account_id(), &project.owner, Self::u128_to_balance(project.grants), KeepAlive ); } round.ongoing = false; Rounds::<T>::insert(round_id, round); Self::deposit_event(Event::RoundEnded(round_id)); Ok(().into()) }
mail_utils.py
import smtplib from email import encoders from email.header import Header from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.utils import parseaddr, formataddr class SmtpKeys(object): SMTP_HOST = 'smtp_host' SMTP_PORT = 'smtp_port' SMTP_SENDER = 'smtp_sender' SMTP_RECEIVER = 'smtp_receiver' SMTP_SUBJECT = 'smtp_subject' SMTP_USERNAME = 'smtp_username' SMTP_PASSWORD = 'smtp_password' SMTP_ATTACHMENTS = 'smtp_attachments' SMTP_IS_THIRD_PARTY = 'smtp_third_party' SMTP_IS_SEND_HTML = 'smtp_send_html' SMTP_IS_SSL = 'smtp_ssl' SMTP_IS_VERBOSE_LOG = 'smtp_verbose_log' class SmtpHelper(object): """ The class represents an object for simplifying use operate in python email module and provides chaining invoke style for config. Usage: a = Attachment('E:\\test.jpg', 'test.jpg', 'image', 'jpg') helper = SmtpHelper() \ .with_server('smtp.xx.com', 25) \ .with_third_party_service() \ .with_ssl() \ .with_server_login('[email protected]', 'xxxxxxxxxxx') \ .with_sender('SylvanasSun', '[email protected]') \ .with_receiver('Claude Shannon', ['[email protected]']) \ .with_subject('Hello') \ .with_attachment(a) helper.send('Hello, World') Author: SylvanasSun <[email protected]> Licence: MIT """ def __init__(self): self.params = {} def with_server(self, host, port=25): if not isinstance(host, str) or not isinstance(port, int): raise ValueError('The type of host and port must be a string and integer') self.params[SmtpKeys.SMTP_HOST] = host self.params[SmtpKeys.SMTP_PORT] = port return self def with_server_login(self, username, password): if not isinstance(username, str) or not isinstance(password, str): raise ValueError('The type of username and password must be a string') self.params[SmtpKeys.SMTP_USERNAME] = username self.params[SmtpKeys.SMTP_PASSWORD] = password return self def with_sender(self, sender, addr): if not isinstance(sender, str) or not isinstance(addr, str): raise ValueError('The type of sender and address must be a string') self.params[SmtpKeys.SMTP_SENDER] = [sender, addr] return self def with_receiver(self, receiver, addr): if not isinstance(receiver, str): raise ValueError('The type of receiver must be a string') if not isinstance(addr, list): raise ValueError('The type of receiver address must be a list') self.params[SmtpKeys.SMTP_RECEIVER] = [receiver, addr] return self def with_subject(self, subject): if not isinstance(subject, str): raise ValueError('The type of subject must be a string') self.params[SmtpKeys.SMTP_SUBJECT] = subject return self def with_third_party_service(self): self.params[SmtpKeys.SMTP_IS_THIRD_PARTY] = True return self def with_send_html(self): self.params[SmtpKeys.SMTP_IS_SEND_HTML] = True return self def with_ssl(self): self.params[SmtpKeys.SMTP_IS_SSL] = True return self def with_verbose_log(self): self.params[SmtpKeys.SMTP_IS_VERBOSE_LOG] = True return self def with_attachment(self, attachments): if not isinstance(attachments, Attachment) and not isinstance(attachments, list): raise ValueError('The type of attachments must be a class Attachment or list') elif isinstance(attachments, list): for a in attachments: if not isinstance(a, Attachment): raise ValueError('The type of attachments must be a class Attachment') self.params[SmtpKeys.SMTP_ATTACHMENTS] = attachments return self def send(self, message_body, encode_format='utf-8'): self._validate_necessary_metadata([SmtpKeys.SMTP_HOST, SmtpKeys.SMTP_PORT, SmtpKeys.SMTP_SUBJECT, SmtpKeys.SMTP_SENDER, SmtpKeys.SMTP_RECEIVER]) message, from_addr, to_addr = self._build_message(message_body, encode_format) smtp_server = self._build_smtp_server() try: smtp_server.sendmail(from_addr, to_addr, message.as_string()) finally: smtp_server.quit() def _build_message(self, message_body, encode_format): msg = MIMEMultipart() sender = self.params[SmtpKeys.SMTP_SENDER] msg['From'] = self._format_addr('%s <%s>' % (sender[0], sender[1]), encode_format) receiver = self.params[SmtpKeys.SMTP_RECEIVER] if len(receiver[1]) == 1: msg['To'] = self._format_addr('%s <%s>' % (receiver[0], receiver[1]), encode_format) else: msg['To'] = self._format_addr(receiver[0], encode_format) msg['Subject'] = Header(self.params[SmtpKeys.SMTP_SUBJECT], encode_format).encode() if self._validated(SmtpKeys.SMTP_IS_SEND_HTML): msg.attach(MIMEText(message_body, 'html', encode_format)) else: msg.attach(MIMEText(message_body, 'plain', encode_format)) if SmtpKeys.SMTP_ATTACHMENTS in self.params: attachments = self.params[SmtpKeys.SMTP_ATTACHMENTS] if isinstance(attachments, list): for a in attachments: msg.attach(a.to_MIME()) else: msg.attach(attachments.to_MIME()) return msg, sender[1], receiver[1] def _build_smtp_server(self): if self._validated(SmtpKeys.SMTP_IS_SSL): smtp = smtplib.SMTP_SSL(self.params[SmtpKeys.SMTP_HOST], self.params[SmtpKeys.SMTP_PORT]) else: smtp = smtplib.SMTP(self.params[SmtpKeys.SMTP_HOST], self.params[SmtpKeys.SMTP_PORT]) if self._validated(SmtpKeys.SMTP_IS_THIRD_PARTY): if SmtpKeys.SMTP_USERNAME in self.params and SmtpKeys.SMTP_PASSWORD in self.params: smtp.login(self.params[SmtpKeys.SMTP_USERNAME], self.params[SmtpKeys.SMTP_PASSWORD]) if self._validated(SmtpKeys.SMTP_IS_VERBOSE_LOG): smtp.set_debuglevel(1) return smtp def _format_addr(self, s, encode_format='utf-8'): name, addr = parseaddr(s) return formataddr((Header(name, encode_format).encode(), addr)) def _validated(self, key): return key in self.params and self.params[key] def _validate_necessary_metadata(self, keys): for key in keys: if key not in self.params: raise ValueError('The value %s must be exist' % key) class Attachment(object): """ The class represent an object that comprises necessary metadata of one attachment and provides a function for convert to MIMEBase """ def
(self, path, filename, maintype, subtype, id=None): self.path = path self.filename = filename self.maintype = maintype self.subtype = subtype self.id = id def to_MIME(self): mime = MIMEBase(self.maintype, self.subtype, filename=self.filename) mime.add_header('Content-Disposition', 'attachment', filename=self.filename) if self.id is not None: mime.add_header('Content-ID', '<%s>' % self.id) mime.add_header('X-Attachment-Id', str(self.id)) with open(self.path, 'rb') as f: mime.set_payload(f.read()) encoders.encode_base64(mime) return mime
__init__
registry_interface.rs
extern crate crypto; extern crate environment; extern crate hyper; extern crate rand; extern crate reqwest; extern crate serde_json; extern crate trow; extern crate trow_server; mod common; #[cfg(test)] mod interface_tests { use environment::Environment; use common; use reqwest::StatusCode; use reqwest; use serde_json; use std::fs::{self, File}; use std::io::Read; use std::process::Child; use std::process::Command; use std::thread; use std::time::Duration; use trow::types::{RepoCatalog, RepoName, TagList}; use trow_server::manifest; const TROW_ADDRESS: &str = "https://trow.test:8443"; /* header! { (DistributionApi, "Docker-Distribution-API-Version") => [String] } header! { (UploadUuid, "Docker-Upload-Uuid") => [String] } */ const DIST_API_HEADER: &str = "Docker-Distribution-API-Version"; const UPLOAD_HEADER: &str = "Docker-Upload-Uuid"; struct TrowInstance { pid: Child, } /// Call out to cargo to start trow. /// Seriously considering moving to docker run.https://docs.docker.com/registry/spec/auth/token/ fn start_trow() -> TrowInstance { let mut child = Command::new("cargo") //.current_dir("../../") .arg("run") .env_clear() .envs(Environment::inherit().compile()) .spawn() .expect("failed to start"); let mut timeout = 20; let mut buf = Vec::new(); File::open("./certs/ca.crt") .unwrap() .read_to_end(&mut buf) .unwrap(); let cert = reqwest::Certificate::from_pem(&buf).unwrap(); // get a client builder let client = reqwest::Client::builder() .add_root_certificate(cert) .build() .unwrap(); let mut response = client.get(TROW_ADDRESS).send(); while timeout > 0 && (response.is_err() || (response.unwrap().status() != StatusCode::OK)) { thread::sleep(Duration::from_millis(100)); response = client.get(TROW_ADDRESS).send(); timeout -= 1; } if timeout == 0 { child.kill().unwrap(); panic!("Failed to start Trow"); } TrowInstance { pid: child } } impl Drop for TrowInstance { fn drop(&mut self) { //Y U NO HV STOP? self.pid.kill().unwrap(); } } fn get_main(cl: &reqwest::Client) { let resp = cl.get(TROW_ADDRESS).send().unwrap(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get(DIST_API_HEADER).unwrap(), "registry/2.0" ); //All v2 registries should respond with a 200 to this let resp = cl .get(&(TROW_ADDRESS.to_owned() + "/v2/")) .send() .unwrap(); assert_eq!(resp.status(), StatusCode::OK); assert_eq!( resp.headers().get(DIST_API_HEADER).unwrap(), "registry/2.0" ); } fn
(cl: &reqwest::Client) { let resp = cl .get(&(TROW_ADDRESS.to_owned() + "/v2/test/test/blobs/not-an-entry")) .send() .unwrap(); assert_eq!(resp.status(), StatusCode::NOT_FOUND); } fn unsupported(cl: &reqwest::Client) { //Delete currently unimplemented let resp = cl .delete(&(TROW_ADDRESS.to_owned() + "/v2/name/repo/manifests/ref")) .send() .unwrap(); assert_eq!(resp.status(), StatusCode::METHOD_NOT_ALLOWED); } fn get_manifest(cl: &reqwest::Client, name: &str, tag: &str) { //Might need accept headers here let mut resp = cl .get(&format!("{}/v2/{}/manifests/{}", TROW_ADDRESS, name, tag)) .send() .unwrap(); assert_eq!(resp.status(), StatusCode::OK); let mani: manifest::ManifestV2 = resp.json().unwrap(); assert_eq!(mani.schema_version, 2); } fn check_repo_catalog(cl: &reqwest::Client, rc: &RepoCatalog) { let mut resp = cl .get(&format!("{}/v2/_catalog", TROW_ADDRESS)) .send() .unwrap(); let rc_resp: RepoCatalog = serde_json::from_str(&resp.text().unwrap()).unwrap(); assert_eq!(rc, &rc_resp); } fn check_tag_list(cl: &reqwest::Client, tl: &TagList) { let mut resp = cl .get(&format!( "{}/v2/{}/tags/list", TROW_ADDRESS, tl.repo_name() )) .send() .unwrap(); let tl_resp: TagList = serde_json::from_str(&resp.text().unwrap()).unwrap(); assert_eq!(tl, &tl_resp); } #[test] fn test_runner() { //Need to start with empty repo fs::remove_dir_all("./data").unwrap_or(()); //Had issues with stopping and starting trow causing test fails. //It might be possible to improve things with a thread_local let _trow = start_trow(); let mut buf = Vec::new(); File::open("./certs/ca.crt") .unwrap() .read_to_end(&mut buf) .unwrap(); let cert = reqwest::Certificate::from_pem(&buf).unwrap(); // get a client builder let client = reqwest::Client::builder() .add_root_certificate(cert) .build() .unwrap(); println!("Running get_main()"); get_main(&client); println!("Running get_blob()"); get_non_existent_blob(&client); println!("Running unsupported()"); unsupported(&client); println!("Running upload_layer(repo/image/test:tag)"); common::upload_layer(&client, "repo/image/test", "tag"); println!("Running upload_layer(image/test:latest)"); common::upload_layer(&client, "image/test", "latest"); println!("Running upload_layer(onename:tag)"); common::upload_layer(&client, "onename", "tag"); println!("Running upload_layer(onename:latest)"); common::upload_layer(&client, "onename", "latest"); println!("Running get_manifest(onename:tag)"); get_manifest(&client, "onename", "tag"); println!("Running get_manifest(image/test:latest)"); get_manifest(&client, "image/test", "latest"); println!("Running get_manifest(repo/image/test:tag)"); get_manifest(&client, "repo/image/test", "tag"); let mut rc = RepoCatalog::new(); rc.insert(RepoName("repo/image/test".to_string())); rc.insert(RepoName("image/test".to_string())); rc.insert(RepoName("onename".to_string())); check_repo_catalog(&client, &rc); let mut tl = TagList::new(RepoName("repo/image/test".to_string())); tl.insert("tag".to_string()); check_tag_list(&client, &tl); let mut tl2 = TagList::new(RepoName("onename".to_string())); tl2.insert("tag".to_string()); tl2.insert("latest".to_string()); check_tag_list(&client, &tl2); } }
get_non_existent_blob
manifests.go
// Copyright 2018 The Cluster Monitoring Operator Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package manifests import ( "bytes" // #nosec "crypto/sha1" "encoding/base64" "encoding/json" "fmt" "hash/fnv" "io" "net" "net/url" "strconv" "strings" monv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" configv1 "github.com/openshift/api/config/v1" routev1 "github.com/openshift/api/route/v1" securityv1 "github.com/openshift/api/security/v1" "github.com/openshift/cluster-monitoring-operator/pkg/promqlgen" "github.com/pkg/errors" admissionv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/api/extensions/v1beta1" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/yaml" apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" ) const ( configManagedNamespace = "openshift-config-managed" sharedConfigMap = "monitoring-shared-config" ) var ( AlertmanagerConfig = "assets/alertmanager/secret.yaml" AlertmanagerService = "assets/alertmanager/service.yaml" AlertmanagerProxySecret = "assets/alertmanager/proxy-secret.yaml" AlertmanagerMain = "assets/alertmanager/alertmanager.yaml" AlertmanagerServiceAccount = "assets/alertmanager/service-account.yaml" AlertmanagerClusterRoleBinding = "assets/alertmanager/cluster-role-binding.yaml" AlertmanagerClusterRole = "assets/alertmanager/cluster-role.yaml" AlertmanagerRBACProxySecret = "assets/alertmanager/kube-rbac-proxy-secret.yaml" AlertmanagerRoute = "assets/alertmanager/route.yaml" AlertmanagerServiceMonitor = "assets/alertmanager/service-monitor.yaml" AlertmanagerTrustedCABundle = "assets/alertmanager/trusted-ca-bundle.yaml" KubeStateMetricsClusterRoleBinding = "assets/kube-state-metrics/cluster-role-binding.yaml" KubeStateMetricsClusterRole = "assets/kube-state-metrics/cluster-role.yaml" KubeStateMetricsDeployment = "assets/kube-state-metrics/deployment.yaml" KubeStateMetricsServiceAccount = "assets/kube-state-metrics/service-account.yaml" KubeStateMetricsService = "assets/kube-state-metrics/service.yaml" KubeStateMetricsServiceMonitor = "assets/kube-state-metrics/service-monitor.yaml" OpenShiftStateMetricsClusterRoleBinding = "assets/openshift-state-metrics/cluster-role-binding.yaml" OpenShiftStateMetricsClusterRole = "assets/openshift-state-metrics/cluster-role.yaml" OpenShiftStateMetricsDeployment = "assets/openshift-state-metrics/deployment.yaml" OpenShiftStateMetricsServiceAccount = "assets/openshift-state-metrics/service-account.yaml" OpenShiftStateMetricsService = "assets/openshift-state-metrics/service.yaml" OpenShiftStateMetricsServiceMonitor = "assets/openshift-state-metrics/service-monitor.yaml" NodeExporterDaemonSet = "assets/node-exporter/daemonset.yaml" NodeExporterService = "assets/node-exporter/service.yaml" NodeExporterServiceAccount = "assets/node-exporter/service-account.yaml" NodeExporterClusterRole = "assets/node-exporter/cluster-role.yaml" NodeExporterClusterRoleBinding = "assets/node-exporter/cluster-role-binding.yaml" NodeExporterSecurityContextConstraints = "assets/node-exporter/security-context-constraints.yaml" NodeExporterServiceMonitor = "assets/node-exporter/service-monitor.yaml" PrometheusK8sClusterRoleBinding = "assets/prometheus-k8s/cluster-role-binding.yaml" PrometheusK8sRoleBindingConfig = "assets/prometheus-k8s/role-binding-config.yaml" PrometheusK8sRoleBindingList = "assets/prometheus-k8s/role-binding-specific-namespaces.yaml" PrometheusK8sClusterRole = "assets/prometheus-k8s/cluster-role.yaml" PrometheusK8sRoleConfig = "assets/prometheus-k8s/role-config.yaml" PrometheusK8sRoleList = "assets/prometheus-k8s/role-specific-namespaces.yaml" PrometheusK8sRules = "assets/prometheus-k8s/rules.yaml" PrometheusK8sServiceAccount = "assets/prometheus-k8s/service-account.yaml" PrometheusK8s = "assets/prometheus-k8s/prometheus.yaml" PrometheusK8sKubeletServiceMonitor = "assets/prometheus-k8s/service-monitor-kubelet.yaml" PrometheusK8sPrometheusServiceMonitor = "assets/prometheus-k8s/service-monitor.yaml" PrometheusK8sService = "assets/prometheus-k8s/service.yaml" PrometheusK8sProxySecret = "assets/prometheus-k8s/proxy-secret.yaml" PrometheusRBACProxySecret = "assets/prometheus-k8s/kube-rbac-proxy-secret.yaml" PrometheusK8sRoute = "assets/prometheus-k8s/route.yaml" PrometheusK8sHtpasswd = "assets/prometheus-k8s/htpasswd-secret.yaml" PrometheusK8sEtcdServiceMonitor = "assets/prometheus-k8s/service-monitor-etcd.yaml" PrometheusK8sServingCertsCABundle = "assets/prometheus-k8s/serving-certs-ca-bundle.yaml" PrometheusK8sKubeletServingCABundle = "assets/prometheus-k8s/kubelet-serving-ca-bundle.yaml" PrometheusK8sGrpcTLSSecret = "assets/prometheus-k8s/grpc-tls-secret.yaml" PrometheusK8sTrustedCABundle = "assets/prometheus-k8s/trusted-ca-bundle.yaml" PrometheusUserWorkloadServingCertsCABundle = "assets/prometheus-user-workload/serving-certs-ca-bundle.yaml" PrometheusUserWorkloadServiceAccount = "assets/prometheus-user-workload/service-account.yaml" PrometheusUserWorkloadClusterRole = "assets/prometheus-user-workload/cluster-role.yaml" PrometheusUserWorkloadClusterRoleBinding = "assets/prometheus-user-workload/cluster-role-binding.yaml" PrometheusUserWorkloadRoleConfig = "assets/prometheus-user-workload/role-config.yaml" PrometheusUserWorkloadRoleList = "assets/prometheus-user-workload/role-specific-namespaces.yaml" PrometheusUserWorkloadRoleBindingList = "assets/prometheus-user-workload/role-binding-specific-namespaces.yaml" PrometheusUserWorkloadRoleBindingConfig = "assets/prometheus-user-workload/role-binding-config.yaml" PrometheusUserWorkloadService = "assets/prometheus-user-workload/service.yaml" PrometheusUserWorkload = "assets/prometheus-user-workload/prometheus.yaml" PrometheusUserWorkloadPrometheusServiceMonitor = "assets/prometheus-user-workload/service-monitor.yaml" PrometheusUserWorkloadGrpcTLSSecret = "assets/prometheus-user-workload/grpc-tls-secret.yaml" PrometheusAdapterAPIService = "assets/prometheus-adapter/api-service.yaml" PrometheusAdapterClusterRole = "assets/prometheus-adapter/cluster-role.yaml" PrometheusAdapterClusterRoleBinding = "assets/prometheus-adapter/cluster-role-binding.yaml" PrometheusAdapterClusterRoleBindingDelegator = "assets/prometheus-adapter/cluster-role-binding-delegator.yaml" PrometheusAdapterClusterRoleBindingView = "assets/prometheus-adapter/cluster-role-binding-view.yaml" PrometheusAdapterClusterRoleServerResources = "assets/prometheus-adapter/cluster-role-server-resources.yaml" PrometheusAdapterClusterRoleAggregatedMetricsReader = "assets/prometheus-adapter/cluster-role-aggregated-metrics-reader.yaml" PrometheusAdapterConfigMap = "assets/prometheus-adapter/config-map.yaml" PrometheusAdapterConfigMapPrometheus = "assets/prometheus-adapter/configmap-prometheus.yaml" PrometheusAdapterDeployment = "assets/prometheus-adapter/deployment.yaml" PrometheusAdapterRoleBindingAuthReader = "assets/prometheus-adapter/role-binding-auth-reader.yaml" PrometheusAdapterService = "assets/prometheus-adapter/service.yaml" PrometheusAdapterServiceAccount = "assets/prometheus-adapter/service-account.yaml" PrometheusOperatorClusterRoleBinding = "assets/prometheus-operator/cluster-role-binding.yaml" PrometheusOperatorClusterRole = "assets/prometheus-operator/cluster-role.yaml" PrometheusOperatorServiceAccount = "assets/prometheus-operator/service-account.yaml" PrometheusOperatorDeployment = "assets/prometheus-operator/deployment.yaml" PrometheusOperatorService = "assets/prometheus-operator/service.yaml" PrometheusOperatorServiceMonitor = "assets/prometheus-operator/service-monitor.yaml" PrometheusOperatorCertsCABundle = "assets/prometheus-operator/operator-certs-ca-bundle.yaml" PrometheusOperatorRuleValidatingWebhook = "assets/prometheus-operator/prometheus-rule-validating-webhook.yaml" PrometheusOperatorUserWorkloadServiceAccount = "assets/prometheus-operator-user-workload/service-account.yaml" PrometheusOperatorUserWorkloadClusterRole = "assets/prometheus-operator-user-workload/cluster-role.yaml" PrometheusOperatorUserWorkloadClusterRoleBinding = "assets/prometheus-operator-user-workload/cluster-role-binding.yaml" PrometheusOperatorUserWorkloadService = "assets/prometheus-operator-user-workload/service.yaml" PrometheusOperatorUserWorkloadDeployment = "assets/prometheus-operator-user-workload/deployment.yaml" PrometheusOperatorUserWorkloadServiceMonitor = "assets/prometheus-operator-user-workload/service-monitor.yaml" GrafanaClusterRoleBinding = "assets/grafana/cluster-role-binding.yaml" GrafanaClusterRole = "assets/grafana/cluster-role.yaml" GrafanaConfigSecret = "assets/grafana/config.yaml" GrafanaDatasourcesSecret = "assets/grafana/dashboard-datasources.yaml" GrafanaDashboardDefinitions = "assets/grafana/dashboard-definitions.yaml" GrafanaDashboardSources = "assets/grafana/dashboard-sources.yaml" GrafanaDeployment = "assets/grafana/deployment.yaml" GrafanaProxySecret = "assets/grafana/proxy-secret.yaml" GrafanaRoute = "assets/grafana/route.yaml" GrafanaServiceAccount = "assets/grafana/service-account.yaml" GrafanaService = "assets/grafana/service.yaml" GrafanaServiceMonitor = "assets/grafana/service-monitor.yaml" GrafanaTrustedCABundle = "assets/grafana/trusted-ca-bundle.yaml" ClusterMonitoringOperatorService = "assets/cluster-monitoring-operator/service.yaml" ClusterMonitoringOperatorServiceMonitor = "assets/cluster-monitoring-operator/service-monitor.yaml" ClusterMonitoringClusterRole = "assets/cluster-monitoring-operator/cluster-role.yaml" ClusterMonitoringRulesEditClusterRole = "assets/cluster-monitoring-operator/monitoring-rules-edit-cluster-role.yaml" ClusterMonitoringRulesViewClusterRole = "assets/cluster-monitoring-operator/monitoring-rules-view-cluster-role.yaml" ClusterMonitoringEditClusterRole = "assets/cluster-monitoring-operator/monitoring-edit-cluster-role.yaml" ClusterMonitoringGrpcTLSSecret = "assets/cluster-monitoring-operator/grpc-tls-secret.yaml" TelemeterClientClusterRole = "assets/telemeter-client/cluster-role.yaml" TelemeterClientClusterRoleBinding = "assets/telemeter-client/cluster-role-binding.yaml" TelemeterClientClusterRoleBindingView = "assets/telemeter-client/cluster-role-binding-view.yaml" TelemeterClientDeployment = "assets/telemeter-client/deployment.yaml" TelemeterClientSecret = "assets/telemeter-client/secret.yaml" TelemeterClientService = "assets/telemeter-client/service.yaml" TelemeterClientServiceAccount = "assets/telemeter-client/service-account.yaml" TelemeterClientServiceMonitor = "assets/telemeter-client/service-monitor.yaml" TelemeterClientServingCertsCABundle = "assets/telemeter-client/serving-certs-c-a-bundle.yaml" ThanosQuerierDeployment = "assets/thanos-querier/deployment.yaml" ThanosQuerierService = "assets/thanos-querier/service.yaml" ThanosQuerierServiceMonitor = "assets/thanos-querier/service-monitor.yaml" ThanosQuerierPrometheusRule = "assets/thanos-querier/prometheus-rule.yaml" ThanosQuerierRoute = "assets/thanos-querier/route.yaml" ThanosQuerierOauthCookieSecret = "assets/thanos-querier/oauth-cookie-secret.yaml" ThanosQuerierHtpasswdSecret = "assets/thanos-querier/oauth-htpasswd-secret.yaml" ThanosQuerierRBACProxySecret = "assets/thanos-querier/kube-rbac-proxy-secret.yaml" ThanosQuerierServiceAccount = "assets/thanos-querier/service-account.yaml" ThanosQuerierClusterRole = "assets/thanos-querier/cluster-role.yaml" ThanosQuerierClusterRoleBinding = "assets/thanos-querier/cluster-role-binding.yaml" ThanosQuerierGrpcTLSSecret = "assets/thanos-querier/grpc-tls-secret.yaml" ThanosQuerierTrustedCABundle = "assets/thanos-querier/trusted-ca-bundle.yaml" ThanosRulerCustomResource = "assets/thanos-ruler/thanos-ruler.yaml" ThanosRulerService = "assets/thanos-ruler/service.yaml" ThanosRulerRoute = "assets/thanos-ruler/route.yaml" ThanosRulerOauthCookieSecret = "assets/thanos-ruler/oauth-cookie-secret.yaml" ThanosRulerHtpasswdSecret = "assets/thanos-ruler/oauth-htpasswd-secret.yaml" ThanosRulerQueryConfigSecret = "assets/thanos-ruler/query-config-secret.yaml" ThanosRulerAlertmanagerConfigSecret = "assets/thanos-ruler/alertmanagers-config-secret.yaml" ThanosRulerServiceAccount = "assets/thanos-ruler/service-account.yaml" ThanosRulerClusterRole = "assets/thanos-ruler/cluster-role.yaml" ThanosRulerClusterRoleBinding = "assets/thanos-ruler/cluster-role-binding.yaml" ThanosRulerMonitoringClusterRoleBinding = "assets/thanos-ruler/cluster-role-binding-monitoring.yaml" ThanosRulerGrpcTLSSecret = "assets/thanos-ruler/grpc-tls-secret.yaml" ThanosRulerTrustedCABundle = "assets/thanos-ruler/trusted-ca-bundle.yaml" ThanosRulerServiceMonitor = "assets/thanos-ruler/service-monitor.yaml" ThanosRulerPrometheusRule = "assets/thanos-ruler/thanos-ruler-prometheus-rule.yaml" TelemeterTrustedCABundle = "assets/telemeter-client/trusted-ca-bundle.yaml" ) var ( PrometheusConfigReloaderFlag = "--prometheus-config-reloader=" ConfigReloaderImageFlag = "--config-reloader-image=" PrometheusOperatorNamespaceFlag = "--namespaces=" PrometheusOperatorDenyNamespaceFlag = "--deny-namespaces=" PrometheusOperatorPrometheusInstanceNamespacesFlag = "--prometheus-instance-namespaces=" PrometheusOperatorAlertmanagerInstanceNamespacesFlag = "--alertmanager-instance-namespaces=" AuthProxyExternalURLFlag = "-external-url=" AuthProxyCookieDomainFlag = "-cookie-domain=" AuthProxyRedirectURLFlag = "-redirect-url=" TrustedCABundleKey = "ca-bundle.crt" ) const ( IBMCloudPlatformType configv1.PlatformType = "IBMCloud" ) func MustAssetReader(asset string) io.Reader { return bytes.NewReader(MustAsset(asset)) } type Factory struct { namespace, namespaceUserWorkload string config *Config } func NewFactory(namespace, namespaceUserWorkload string, c *Config) *Factory { return &Factory{ namespace: namespace, namespaceUserWorkload: namespaceUserWorkload, config: c, } } func (f *Factory) PrometheusExternalURL(host string) *url.URL { if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport != "" { host = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport } return &url.URL{ Scheme: "https", Host: host, Path: "/", } } func (f *Factory) AlertmanagerExternalURL(host string) *url.URL { if f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Hostport != "" { host = f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Hostport } return &url.URL{ Scheme: "https", Host: host, Path: "/", } } func (f *Factory) AlertmanagerConfig() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(AlertmanagerConfig)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) AlertmanagerProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(AlertmanagerProxySecret)) if err != nil { return nil, err } p, err := GeneratePassword(43) if err != nil { return nil, err } s.Data["session_secret"] = []byte(p) s.Namespace = f.namespace return s, nil } func (f *Factory) AlertmanagerService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(AlertmanagerService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) AlertmanagerServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(AlertmanagerServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) AlertmanagerClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(AlertmanagerClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) AlertmanagerClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(AlertmanagerClusterRole)) } func (f *Factory) AlertmanagerServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(AlertmanagerServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("alertmanager-main.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func (f *Factory) AlertmanagerTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(AlertmanagerTrustedCABundle)) if err != nil { return nil, err } return cm, nil } func (f *Factory) AlertmanagerMain(host string, trustedCABundleCM *v1.ConfigMap) (*monv1.Alertmanager, error) { a, err := f.NewAlertmanager(MustAssetReader(AlertmanagerMain)) if err != nil { return nil, err } a.Spec.Image = &f.config.Images.Alertmanager a.Spec.ExternalURL = f.AlertmanagerExternalURL(host).String() if f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Resources != nil { a.Spec.Resources = *f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Resources } if f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.VolumeClaimTemplate != nil { a.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.VolumeClaimTemplate, } } if f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.NodeSelector != nil { a.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Tolerations) > 0 { a.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Tolerations } setEnv := func(container *v1.Container, name, value string) { for i := range container.Env { if container.Env[i].Name == name { container.Env[i].Value = value break } } } for i, c := range a.Spec.Containers { switch c.Name { case "alertmanager-proxy": a.Spec.Containers[i].Image = f.config.Images.OauthProxy if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv(&a.Spec.Containers[i], "HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv(&a.Spec.Containers[i], "HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv(&a.Spec.Containers[i], "NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } if trustedCABundleCM != nil { volumeName := "alertmanager-trusted-ca-bundle" a.Spec.VolumeMounts = append(a.Spec.VolumeMounts, trustedCABundleVolumeMount(volumeName)) volume := trustedCABundleVolume(trustedCABundleCM.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) a.Spec.Volumes = append(a.Spec.Volumes, volume) a.Spec.Containers[i].VolumeMounts = append( a.Spec.Containers[i].VolumeMounts, trustedCABundleVolumeMount(volumeName), ) } case "kube-rbac-proxy": a.Spec.Containers[i].Image = f.config.Images.KubeRbacProxy case "prom-label-proxy": a.Spec.Containers[i].Image = f.config.Images.PromLabelProxy } } a.Namespace = f.namespace return a, nil } func (f *Factory) AlertmanagerRBACProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(AlertmanagerRBACProxySecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) AlertmanagerRoute() (*routev1.Route, error) { r, err := f.NewRoute(MustAssetReader(AlertmanagerRoute)) if err != nil { return nil, err } if f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Hostport != "" { r.Spec.Host = f.config.ClusterMonitoringConfiguration.AlertmanagerMainConfig.Hostport } r.Namespace = f.namespace return r, nil } func (f *Factory) KubeStateMetricsClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(KubeStateMetricsClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) KubeStateMetricsClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(KubeStateMetricsClusterRole)) } func (f *Factory) KubeStateMetricsServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(KubeStateMetricsServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("kube-state-metrics.%s.svc", f.namespace) sm.Spec.Endpoints[1].TLSConfig.ServerName = fmt.Sprintf("kube-state-metrics.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func (f *Factory) KubeStateMetricsDeployment() (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(KubeStateMetricsDeployment)) if err != nil { return nil, err } for i, container := range d.Spec.Template.Spec.Containers { if container.Name == "kube-state-metrics" { d.Spec.Template.Spec.Containers[i].Image = f.config.Images.KubeStateMetrics } if container.Name == "kube-rbac-proxy-self" || container.Name == "kube-rbac-proxy-main" { d.Spec.Template.Spec.Containers[i].Image = f.config.Images.KubeRbacProxy } } if f.config.ClusterMonitoringConfiguration.KubeStateMetricsConfig.NodeSelector != nil { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.KubeStateMetricsConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.KubeStateMetricsConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.KubeStateMetricsConfig.Tolerations } d.Namespace = f.namespace return d, nil } func (f *Factory) KubeStateMetricsServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(KubeStateMetricsServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) KubeStateMetricsService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(KubeStateMetricsService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) OpenShiftStateMetricsClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(OpenShiftStateMetricsClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) OpenShiftStateMetricsClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(OpenShiftStateMetricsClusterRole)) } func (f *Factory) OpenShiftStateMetricsServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(OpenShiftStateMetricsServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("openshift-state-metrics.%s.svc", f.namespace) sm.Spec.Endpoints[1].TLSConfig.ServerName = fmt.Sprintf("openshift-state-metrics.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func (f *Factory) OpenShiftStateMetricsDeployment() (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(OpenShiftStateMetricsDeployment)) if err != nil { return nil, err } d.Spec.Template.Spec.Containers[0].Image = f.config.Images.KubeRbacProxy d.Spec.Template.Spec.Containers[1].Image = f.config.Images.KubeRbacProxy d.Spec.Template.Spec.Containers[2].Image = f.config.Images.OpenShiftStateMetrics if f.config.ClusterMonitoringConfiguration.OpenShiftMetricsConfig.NodeSelector != nil { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.OpenShiftMetricsConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.OpenShiftMetricsConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.OpenShiftMetricsConfig.Tolerations } d.Namespace = f.namespace return d, nil } func (f *Factory) OpenShiftStateMetricsServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(OpenShiftStateMetricsServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) OpenShiftStateMetricsService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(OpenShiftStateMetricsService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) NodeExporterServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(NodeExporterServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("node-exporter.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func (f *Factory) NodeExporterDaemonSet() (*appsv1.DaemonSet, error) { ds, err := f.NewDaemonSet(MustAssetReader(NodeExporterDaemonSet)) if err != nil { return nil, err } ds.Spec.Template.Spec.InitContainers[0].Image = f.config.Images.NodeExporter ds.Spec.Template.Spec.Containers[0].Image = f.config.Images.NodeExporter ds.Spec.Template.Spec.Containers[1].Image = f.config.Images.KubeRbacProxy ds.Namespace = f.namespace return ds, nil } func (f *Factory) NodeExporterService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(NodeExporterService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) NodeExporterSecurityContextConstraints() (*securityv1.SecurityContextConstraints, error) { scc, err := f.NewSecurityContextConstraints(MustAssetReader(NodeExporterSecurityContextConstraints)) if err != nil { return nil, err } return scc, nil } func (f *Factory) NodeExporterServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(NodeExporterServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) NodeExporterClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(NodeExporterClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) NodeExporterClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(NodeExporterClusterRole)) } func (f *Factory) PrometheusK8sClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusK8sClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) ThanosQuerierClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(ThanosQuerierClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) PrometheusUserWorkloadClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusUserWorkloadClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespaceUserWorkload return crb, nil } func (f *Factory) PrometheusK8sClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusK8sClusterRole)) } func (f *Factory) ThanosQuerierClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(ThanosQuerierClusterRole)) } func (f *Factory) PrometheusUserWorkloadClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusUserWorkloadClusterRole)) } func (f *Factory) PrometheusK8sRoleConfig() (*rbacv1.Role, error) { r, err := f.NewRole(MustAssetReader(PrometheusK8sRoleConfig)) if err != nil { return nil, err } r.Namespace = f.namespace return r, nil } func (f *Factory) PrometheusUserWorkloadRoleConfig() (*rbacv1.Role, error) { r, err := f.NewRole(MustAssetReader(PrometheusUserWorkloadRoleConfig)) if err != nil { return nil, err } r.Namespace = f.namespaceUserWorkload return r, nil } func (f *Factory) PrometheusK8sRoleBindingList() (*rbacv1.RoleBindingList, error) { rbl, err := f.NewRoleBindingList(MustAssetReader(PrometheusK8sRoleBindingList)) if err != nil { return nil, err } for _, rb := range rbl.Items { rb.Subjects[0].Namespace = f.namespace } return rbl, nil } func (f *Factory) PrometheusUserWorkloadRoleBindingList() (*rbacv1.RoleBindingList, error) { rbl, err := f.NewRoleBindingList(MustAssetReader(PrometheusUserWorkloadRoleBindingList)) if err != nil { return nil, err } for _, rb := range rbl.Items { rb.Subjects[0].Namespace = f.namespaceUserWorkload } return rbl, nil } func (f *Factory) PrometheusK8sRoleBindingConfig() (*rbacv1.RoleBinding, error) { rb, err := f.NewRoleBinding(MustAssetReader(PrometheusK8sRoleBindingConfig)) if err != nil { return nil, err } rb.Namespace = f.namespace return rb, nil } func (f *Factory) PrometheusUserWorkloadRoleBindingConfig() (*rbacv1.RoleBinding, error) { rb, err := f.NewRoleBinding(MustAssetReader(PrometheusUserWorkloadRoleBindingConfig)) if err != nil { return nil, err } rb.Namespace = f.namespaceUserWorkload return rb, nil } func (f *Factory) PrometheusK8sRoleList() (*rbacv1.RoleList, error) { rl, err := f.NewRoleList(MustAssetReader(PrometheusK8sRoleList)) if err != nil { return nil, err } for _, r := range rl.Items { r.Namespace = f.namespace } return rl, nil } func (f *Factory) PrometheusUserWorkloadRoleList() (*rbacv1.RoleList, error) { rl, err := f.NewRoleList(MustAssetReader(PrometheusUserWorkloadRoleList)) if err != nil { return nil, err } for _, r := range rl.Items { r.Namespace = f.namespaceUserWorkload } return rl, nil } func (f *Factory) PrometheusK8sRules() (*monv1.PrometheusRule, error) { r, err := f.NewPrometheusRule(MustAssetReader(PrometheusK8sRules)) if err != nil { return nil, err } r.Namespace = f.namespace if !f.config.ClusterMonitoringConfiguration.EtcdConfig.IsEnabled() { groups := []monv1.RuleGroup{} for _, g := range r.Spec.Groups { if g.Name != "etcd" { groups = append(groups, g) } } r.Spec.Groups = groups } if f.config.Platform == IBMCloudPlatformType { groups := []monv1.RuleGroup{} for _, g := range r.Spec.Groups { switch g.Name { case "kubernetes-system-apiserver", "kubernetes-system-controller-manager", "kubernetes-system-scheduler": // skip default: groups = append(groups, g) } } r.Spec.Groups = groups } return r, nil } func (f *Factory) PrometheusK8sServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(PrometheusK8sServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) ThanosQuerierServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(ThanosQuerierServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusUserWorkloadServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(PrometheusUserWorkloadServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) PrometheusK8sProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(PrometheusK8sProxySecret)) if err != nil { return nil, err } p, err := GeneratePassword(43) if err != nil { return nil, err } s.Data["session_secret"] = []byte(p) s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusK8sGrpcTLSSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(PrometheusK8sGrpcTLSSecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusUserWorkloadGrpcTLSSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(PrometheusUserWorkloadGrpcTLSSecret)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosQuerierGrpcTLSSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosQuerierGrpcTLSSecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) ThanosQuerierOauthCookieSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosQuerierOauthCookieSecret)) if err != nil { return nil, err } p, err := GeneratePassword(43) if err != nil { return nil, err } s.Data["session_secret"] = []byte(p) s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusK8sHtpasswdSecret(password string) (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(PrometheusK8sHtpasswd)) if err != nil { return nil, err } f.generateHtpasswdSecret(s, password) return s, nil } func (f *Factory) ThanosQuerierHtpasswdSecret(password string) (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosQuerierHtpasswdSecret)) if err != nil { return nil, err } f.generateHtpasswdSecret(s, password) return s, nil } func (f *Factory) ThanosRulerHtpasswdSecret(password string) (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosRulerHtpasswdSecret)) if err != nil { return nil, err } f.generateHtpasswdSecret(s, password) s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) generateHtpasswdSecret(s *v1.Secret, password string) { // #nosec // TODO: Replace this with a safer algorithm h := sha1.New() h.Write([]byte(password)) s.Data["auth"] = []byte("internal:{SHA}" + base64.StdEncoding.EncodeToString(h.Sum(nil))) s.Namespace = f.namespace } func (f *Factory) ThanosRulerQueryConfigSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosRulerQueryConfigSecret)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosRulerAlertmanagerConfigSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosRulerAlertmanagerConfigSecret)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) PrometheusRBACProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(PrometheusRBACProxySecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) ThanosQuerierRBACProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosQuerierRBACProxySecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusK8sServingCertsCABundle() (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(PrometheusK8sServingCertsCABundle)) if err != nil { return nil, err } c.Namespace = f.namespace return c, nil } func (f *Factory) PrometheusUserWorkloadServingCertsCABundle() (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(PrometheusUserWorkloadServingCertsCABundle)) if err != nil { return nil, err } c.Namespace = f.namespaceUserWorkload return c, nil } func (f *Factory) PrometheusK8sKubeletServingCABundle(data map[string]string) (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(PrometheusK8sKubeletServingCABundle)) if err != nil { return nil, err } c.Namespace = f.namespace c.Data = data return c, nil } func (f *Factory) PrometheusOperatorCertsCABundle() (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(PrometheusOperatorCertsCABundle)) if err != nil { return nil, err } c.Namespace = f.namespace return c, nil } func (f *Factory) PrometheusK8sEtcdServiceMonitor() (*monv1.ServiceMonitor, error) { s, err := f.NewServiceMonitor(MustAssetReader(PrometheusK8sEtcdServiceMonitor)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusK8sEtcdSecret(tlsClient *v1.Secret, ca *v1.ConfigMap) (*v1.Secret, error) { data := make(map[string]string) for k, v := range tlsClient.Data { data[k] = string(v) } for k, v := range ca.Data { data[k] = v } r := newErrMapReader(data) var ( clientCA = r.value(TrustedCABundleKey) clientCert = r.value("tls.crt") clientKey = r.value("tls.key") ) if r.Error() != nil { return nil, errors.Wrap(r.err, "couldn't find etcd certificate data") } return &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.namespace, Name: "kube-etcd-client-certs", }, StringData: map[string]string{ "etcd-client-ca.crt": clientCA, "etcd-client.key": clientKey, "etcd-client.crt": clientCert, }, }, nil } func (f *Factory) PrometheusK8sRoute() (*routev1.Route, error) { r, err := f.NewRoute(MustAssetReader(PrometheusK8sRoute)) if err != nil { return nil, err } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport != "" { r.Spec.Host = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport } r.Namespace = f.namespace return r, nil } func (f *Factory) ThanosQuerierRoute() (*routev1.Route, error) { r, err := f.NewRoute(MustAssetReader(ThanosQuerierRoute)) if err != nil { return nil, err } // apply hostport configuration to thanos if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport != "" { r.Spec.Host = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Hostport } r.Namespace = f.namespace return r, nil } func (f *Factory) SharingConfigDeprecated(promHost, amHost, grafanaHost, thanosHost *url.URL) *v1.ConfigMap { return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: "sharing-config", Namespace: f.namespace, }, Data: map[string]string{ "grafanaURL": grafanaHost.String(), "prometheusURL": promHost.String(), "alertmanagerURL": amHost.String(), "thanosURL": thanosHost.String(), }, } } func (f *Factory) SharingConfig(promHost, amHost, grafanaHost, thanosHost *url.URL) *v1.ConfigMap { return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: sharedConfigMap, Namespace: configManagedNamespace, }, Data: map[string]string{ // Configmap keys need to include "public" to indicate that they are public values. // See https://bugzilla.redhat.com/show_bug.cgi?id=1807100. "grafanaPublicURL": grafanaHost.String(), "prometheusPublicURL": promHost.String(), "alertmanagerPublicURL": amHost.String(), "thanosPublicURL": thanosHost.String(), }, } } func (f *Factory) PrometheusK8sTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(PrometheusK8sTrustedCABundle)) if err != nil { return nil, err } return cm, nil } const ( // These constants refer to indices of prometheus-k8s containers. // They need to be in sync with jsonnet/prometheus.jsonnet K8S_CONTAINER_OAUTH_PROXY = 0 K8S_CONTAINER_KUBE_RBAC_PROXY = 1 K8S_CONTAINER_PROM_LABEL_PROXY = 2 K8S_CONTAINER_THANOS_SIDECAR = 3 K8S_CONTAINER_PROMETHEUS = 4 ) func (f *Factory) PrometheusK8s(host string, grpcTLS *v1.Secret, trustedCABundleCM *v1.ConfigMap) (*monv1.Prometheus, error) { p, err := f.NewPrometheus(MustAssetReader(PrometheusK8s)) if err != nil { return nil, err } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.LogLevel != "" { p.Spec.LogLevel = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.LogLevel } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Retention != "" { p.Spec.Retention = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Retention } p.Spec.Image = &f.config.Images.Prometheus p.Spec.ExternalURL = f.PrometheusExternalURL(host).String() if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Resources != nil { p.Spec.Resources = *f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Resources } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.NodeSelector != nil { p.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Tolerations) > 0 { p.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.Tolerations } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels != nil { p.Spec.ExternalLabels = f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.ExternalLabels } if f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.VolumeClaimTemplate != nil { p.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.VolumeClaimTemplate, } } telemetryEnabled := f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.IsEnabled() if telemetryEnabled && f.config.RemoteWrite { selectorRelabelConfig, err := promqlgen.LabelSelectorsToRelabelConfig(f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.TelemetryMatches) if err != nil { return nil, errors.Wrap(err, "generate label selector relabel config") } compositeToken, err := json.Marshal(map[string]string{ "cluster_id": f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.ClusterID, "authorization_token": f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.Token, }) spec := monv1.RemoteWriteSpec{ URL: f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.TelemeterServerURL, BearerToken: base64.StdEncoding.EncodeToString(compositeToken), QueueConfig: &monv1.QueueConfig{ // Amount of samples to load from the WAL into the in-memory // buffer before waiting for samples to be sent successfully // and then continuing to read from the WAL. Capacity: 30000, // Should we accumulate 10000 samples before the batch send // deadline is reached, we will send this amount of samples // anyways. MaxSamplesPerSend: 10000, // Batch samples for 1m until we send them if we not reach the // 10000 MaxSamplesPerSend first. BatchSendDeadline: "1m", // Backoff is doubled on every backoff. We start with 1s // backoff and double until the MaxBackOff. MinBackoff: "1s", // 128s is the 8th backoff in a row, once we end up here, we // don't increase backoff time anymore. As we would at most // produce (concurrency/256) number of requests per second. MaxBackoff: "256s", }, WriteRelabelConfigs: []monv1.RelabelConfig{ *selectorRelabelConfig, monv1.RelabelConfig{ TargetLabel: "_id", Replacement: f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.ClusterID, }, // relabeling the `ALERTS` series to `alerts` allows us to make // a distinction between the series produced in-cluster and out // of cluster. monv1.RelabelConfig{ SourceLabels: []string{"__name__"}, TargetLabel: "__name__", Regex: "ALERTS", Replacement: "alerts", }, }, } p.Spec.RemoteWrite = []monv1.RemoteWriteSpec{spec} } if !telemetryEnabled { p.Spec.RemoteWrite = nil } if len(f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.RemoteWrite) > 0 { p.Spec.RemoteWrite = append(p.Spec.RemoteWrite, f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.RemoteWrite...) } for _, rw := range p.Spec.RemoteWrite { if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { rw.ProxyURL = f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { rw.ProxyURL = f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy } } if !f.config.ClusterMonitoringConfiguration.EtcdConfig.IsEnabled() { secrets := []string{} for _, s := range p.Spec.Secrets { if s != "kube-etcd-client-certs" { secrets = append(secrets, s) } } p.Spec.Secrets = secrets } if f.config.Images.Thanos != "" { p.Spec.Thanos.Image = &f.config.Images.Thanos } p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].Image = f.config.Images.OauthProxy p.Spec.Containers[K8S_CONTAINER_KUBE_RBAC_PROXY].Image = f.config.Images.KubeRbacProxy p.Spec.Containers[K8S_CONTAINER_PROM_LABEL_PROXY].Image = f.config.Images.PromLabelProxy p.Spec.Alerting.Alertmanagers[0].Namespace = f.namespace p.Spec.Alerting.Alertmanagers[0].TLSConfig.ServerName = fmt.Sprintf("alertmanager-main.%s.svc", f.namespace) p.Namespace = f.namespace setEnv := func(name, value string) { for i := range p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].Env { if p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].Env[i].Name == name { p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].Env[i].Value = value break } } } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv("HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv("HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv("NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{ Name: "secret-grpc-tls", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: grpcTLS.GetName(), }, }, }) if trustedCABundleCM != nil { volumeName := "prometheus-trusted-ca-bundle" volume := trustedCABundleVolume(trustedCABundleCM.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) p.Spec.Volumes = append(p.Spec.Volumes, volume) // we only need the trusted CA bundle in: // 1. Prometheus, because users might want to configure external remote write. // 2. In OAuth proxy, as that communicates externally when executing the OAuth handshake. p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].VolumeMounts = append( p.Spec.Containers[K8S_CONTAINER_OAUTH_PROXY].VolumeMounts, trustedCABundleVolumeMount(volumeName), ) p.Spec.Containers[K8S_CONTAINER_PROMETHEUS].VolumeMounts = append( p.Spec.Containers[K8S_CONTAINER_PROMETHEUS].VolumeMounts, trustedCABundleVolumeMount(volumeName), ) } return p, nil } func (f *Factory) PrometheusUserWorkload(grpcTLS *v1.Secret) (*monv1.Prometheus, error) { p, err := f.NewPrometheus(MustAssetReader(PrometheusUserWorkload)) if err != nil { return nil, err } if f.config.UserWorkloadConfiguration.Prometheus.LogLevel != "" { p.Spec.LogLevel = f.config.UserWorkloadConfiguration.Prometheus.LogLevel } if f.config.UserWorkloadConfiguration.Prometheus.Retention != "" { p.Spec.Retention = f.config.UserWorkloadConfiguration.Prometheus.Retention } p.Spec.Image = &f.config.Images.Prometheus if f.config.UserWorkloadConfiguration.Prometheus.Resources != nil { p.Spec.Resources = *f.config.UserWorkloadConfiguration.Prometheus.Resources } if f.config.UserWorkloadConfiguration.Prometheus.NodeSelector != nil { p.Spec.NodeSelector = f.config.UserWorkloadConfiguration.Prometheus.NodeSelector } if len(f.config.UserWorkloadConfiguration.Prometheus.Tolerations) > 0 { p.Spec.Tolerations = f.config.UserWorkloadConfiguration.Prometheus.Tolerations } if f.config.UserWorkloadConfiguration.Prometheus.ExternalLabels != nil { p.Spec.ExternalLabels = f.config.UserWorkloadConfiguration.Prometheus.ExternalLabels } if f.config.UserWorkloadConfiguration.Prometheus.VolumeClaimTemplate != nil { p.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.UserWorkloadConfiguration.Prometheus.VolumeClaimTemplate, } } if len(f.config.UserWorkloadConfiguration.Prometheus.RemoteWrite) > 0 { p.Spec.RemoteWrite = f.config.UserWorkloadConfiguration.Prometheus.RemoteWrite } // TODO: remove after 4.7 if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.LogLevel != "" { p.Spec.LogLevel = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.LogLevel } if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Retention != "" { p.Spec.Retention = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Retention } if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Resources != nil { p.Spec.Resources = *f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Resources } if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.NodeSelector != nil { p.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Tolerations) > 0 { p.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.Tolerations } if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.ExternalLabels != nil { p.Spec.ExternalLabels = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.ExternalLabels } if f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.VolumeClaimTemplate != nil { p.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.VolumeClaimTemplate, } } if len(f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.RemoteWrite) > 0 { p.Spec.RemoteWrite = f.config.ClusterMonitoringConfiguration.PrometheusUserWorkloadConfig.RemoteWrite } // end removal if f.config.Images.Thanos != "" { p.Spec.Thanos.Image = &f.config.Images.Thanos } p.Spec.Containers[0].Image = f.config.Images.KubeRbacProxy p.Spec.Alerting.Alertmanagers[0].Namespace = f.namespace p.Spec.Alerting.Alertmanagers[0].TLSConfig.ServerName = fmt.Sprintf("alertmanager-main.%s.svc", f.namespace) p.Namespace = f.namespaceUserWorkload p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{ Name: "secret-grpc-tls", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: grpcTLS.GetName(), }, }, }) return p, nil } func (f *Factory) PrometheusK8sKubeletServiceMonitor() (*monv1.ServiceMonitor, error) { s, err := f.NewServiceMonitor(MustAssetReader(PrometheusK8sKubeletServiceMonitor)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusK8sPrometheusServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(PrometheusK8sPrometheusServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("prometheus-k8s.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func (f *Factory) PrometheusUserWorkloadPrometheusServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(PrometheusUserWorkloadPrometheusServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("prometheus-user-workload.%s.svc", f.namespaceUserWorkload) sm.Namespace = f.namespaceUserWorkload return sm, nil } func (f *Factory) PrometheusAdapterClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusAdapterClusterRole)) } func (f *Factory) PrometheusAdapterClusterRoleServerResources() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusAdapterClusterRoleServerResources)) } func (f *Factory) PrometheusAdapterClusterRoleAggregatedMetricsReader() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusAdapterClusterRoleAggregatedMetricsReader)) } func (f *Factory) PrometheusAdapterClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusAdapterClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) PrometheusAdapterClusterRoleBindingDelegator() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusAdapterClusterRoleBindingDelegator)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) PrometheusAdapterClusterRoleBindingView() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusAdapterClusterRoleBindingView)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) PrometheusAdapterRoleBindingAuthReader() (*rbacv1.RoleBinding, error) { rb, err := f.NewRoleBinding(MustAssetReader(PrometheusAdapterRoleBindingAuthReader)) if err != nil { return nil, err } rb.Subjects[0].Namespace = f.namespace return rb, nil } func (f *Factory) PrometheusAdapterServiceAccount() (*v1.ServiceAccount, error) { sa, err := f.NewServiceAccount(MustAssetReader(PrometheusAdapterServiceAccount)) if err != nil { return nil, err } sa.Namespace = f.namespace return sa, nil } func (f *Factory) PrometheusAdapterConfigMap() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(PrometheusAdapterConfigMap)) if err != nil { return nil, err } cm.Namespace = f.namespace return cm, nil } func (f *Factory) PrometheusAdapterConfigMapPrometheus() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(PrometheusAdapterConfigMapPrometheus)) if err != nil { return nil, err } cm.Namespace = f.namespace return cm, nil } func (f *Factory) PrometheusAdapterDeployment(apiAuthSecretName string, requestheader map[string]string) (*appsv1.Deployment, error) { dep, err := f.NewDeployment(MustAssetReader(PrometheusAdapterDeployment)) if err != nil { return nil, err } spec := dep.Spec.Template.Spec spec.Containers[0].Image = f.config.Images.K8sPrometheusAdapter if f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter != nil && len(f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter.NodeSelector) > 0 { spec.NodeSelector = f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter.NodeSelector } if f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter != nil && len(f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter.Tolerations) > 0 { spec.Tolerations = f.config.ClusterMonitoringConfiguration.K8sPrometheusAdapter.Tolerations } dep.Namespace = f.namespace r := newErrMapReader(requestheader) var ( requestheaderAllowedNames = strings.Join(r.slice("requestheader-allowed-names"), ",") requestheaderExtraHeadersPrefix = strings.Join(r.slice("requestheader-extra-headers-prefix"), ",") requestheaderGroupHeaders = strings.Join(r.slice("requestheader-group-headers"), ",") requestheaderUsernameHeaders = strings.Join(r.slice("requestheader-username-headers"), ",") ) if r.Error() != nil { return nil, errors.Wrap(r.err, "value not found in extension api server authentication configmap") } spec.Containers[0].Args = append(spec.Containers[0].Args, "--client-ca-file=/etc/tls/private/client-ca-file", "--requestheader-client-ca-file=/etc/tls/private/requestheader-client-ca-file", "--requestheader-allowed-names="+requestheaderAllowedNames, "--requestheader-extra-headers-prefix="+requestheaderExtraHeadersPrefix, "--requestheader-group-headers="+requestheaderGroupHeaders, "--requestheader-username-headers="+requestheaderUsernameHeaders, "--tls-cert-file=/etc/tls/private/tls.crt", "--tls-private-key-file=/etc/tls/private/tls.key", ) spec.Containers[0].VolumeMounts = append(spec.Containers[0].VolumeMounts, v1.VolumeMount{ Name: "tls", ReadOnly: true, MountPath: "/etc/tls/private", }, ) spec.Volumes = append(spec.Volumes, v1.Volume{ Name: "tls", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: apiAuthSecretName, }, }, }, ) dep.Spec.Template.Spec = spec return dep, nil } func (f *Factory) PrometheusAdapterService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(PrometheusAdapterService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusAdapterSecret(tlsSecret *v1.Secret, apiAuthConfigmap *v1.ConfigMap) (*v1.Secret, error) { data := make(map[string]string) for k, v := range tlsSecret.Data { data[k] = string(v) } for k, v := range apiAuthConfigmap.Data { data[k] = v } r := newErrMapReader(data) var ( clientCA = r.value("client-ca-file") requestheaderClientCA = r.value("requestheader-client-ca-file") tlsCA = r.value("tls.crt") tlsKey = r.value("tls.key") ) if r.Error() != nil { return nil, errors.Wrap(r.err, "value not found in extension api server authentication configmap") } h := fnv.New64() h.Write([]byte(clientCA + requestheaderClientCA + tlsCA + tlsKey)) hash := strconv.FormatUint(h.Sum64(), 32) return &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.namespace, Name: fmt.Sprintf("prometheus-adapter-%s", hash), Labels: map[string]string{ "monitoring.openshift.io/name": "prometheus-adapter", "monitoring.openshift.io/hash": hash, }, }, Data: map[string][]byte{ "client-ca-file": []byte(clientCA), "requestheader-client-ca-file": []byte(requestheaderClientCA), "tls.crt": []byte(tlsCA), "tls.key": []byte(tlsKey), }, }, nil } func (f *Factory) PrometheusAdapterAPIService() (*apiregistrationv1beta1.APIService, error) { return f.NewAPIService(MustAssetReader(PrometheusAdapterAPIService)) } func (f *Factory) PrometheusOperatorServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(PrometheusOperatorServiceMonitor)) if err != nil { return nil, err } sm.Namespace = f.namespace sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("prometheus-operator.%s.svc", f.namespace) return sm, nil } func (f *Factory) PrometheusOperatorUserWorkloadServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(PrometheusOperatorUserWorkloadServiceMonitor)) if err != nil { return nil, err } sm.Namespace = f.namespaceUserWorkload sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("prometheus-operator.%s.svc", f.namespaceUserWorkload) return sm, nil } func (f *Factory) PrometheusOperatorClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusOperatorClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) PrometheusOperatorUserWorkloadClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(PrometheusOperatorUserWorkloadClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespaceUserWorkload return crb, nil } func (f *Factory) PrometheusOperatorClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusOperatorClusterRole)) } func (f *Factory) PrometheusOperatorUserWorkloadClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(PrometheusOperatorUserWorkloadClusterRole)) } func (f *Factory) PrometheusOperatorServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(PrometheusOperatorServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusOperatorUserWorkloadServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(PrometheusOperatorUserWorkloadServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) PrometheusOperatorDeployment(namespaces []string) (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(PrometheusOperatorDeployment)) if err != nil { return nil, err } if len(f.config.ClusterMonitoringConfiguration.PrometheusOperatorConfig.NodeSelector) > 0 { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.PrometheusOperatorConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.PrometheusOperatorConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.PrometheusOperatorConfig.Tolerations } d.Spec.Template.Spec.Containers[0].Image = f.config.Images.PrometheusOperator d.Spec.Template.Spec.Containers[1].Image = f.config.Images.KubeRbacProxy args := d.Spec.Template.Spec.Containers[0].Args for i := range args { if strings.HasPrefix(args[i], PrometheusOperatorNamespaceFlag) && len(namespaces) > 0 { args[i] = PrometheusOperatorNamespaceFlag + strings.Join(namespaces, ",") } if strings.HasPrefix(args[i], PrometheusConfigReloaderFlag) && f.config.Images.PrometheusConfigReloader != "" { args[i] = PrometheusConfigReloaderFlag + f.config.Images.PrometheusConfigReloader } if strings.HasPrefix(args[i], ConfigReloaderImageFlag) && f.config.Images.ConfigmapReloader != "" { args[i] = ConfigReloaderImageFlag + f.config.Images.ConfigmapReloader } if strings.HasPrefix(args[i], PrometheusOperatorAlertmanagerInstanceNamespacesFlag) && f.namespace != "" { args[i] = PrometheusOperatorAlertmanagerInstanceNamespacesFlag + f.namespace } if strings.HasPrefix(args[i], PrometheusOperatorPrometheusInstanceNamespacesFlag) && f.namespace != "" { args[i] = PrometheusOperatorPrometheusInstanceNamespacesFlag + f.namespace } } d.Spec.Template.Spec.Containers[0].Args = args d.Namespace = f.namespace return d, nil } func (f *Factory) PrometheusOperatorUserWorkloadDeployment(denyNamespaces []string) (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(PrometheusOperatorUserWorkloadDeployment)) if err != nil { return nil, err } if len(f.config.UserWorkloadConfiguration.PrometheusOperator.NodeSelector) > 0 { d.Spec.Template.Spec.NodeSelector = f.config.UserWorkloadConfiguration.PrometheusOperator.NodeSelector } if len(f.config.UserWorkloadConfiguration.PrometheusOperator.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.UserWorkloadConfiguration.PrometheusOperator.Tolerations } // TODO: remove in 4.7 if len(f.config.ClusterMonitoringConfiguration.PrometheusOperatorUserWorkloadConfig.NodeSelector) > 0 { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.PrometheusOperatorUserWorkloadConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.PrometheusOperatorUserWorkloadConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.PrometheusOperatorUserWorkloadConfig.Tolerations } // end of remove d.Spec.Template.Spec.Containers[0].Image = f.config.Images.PrometheusOperator d.Spec.Template.Spec.Containers[1].Image = f.config.Images.KubeRbacProxy args := d.Spec.Template.Spec.Containers[0].Args for i := range args { if strings.HasPrefix(args[i], PrometheusOperatorDenyNamespaceFlag) { args[i] = PrometheusOperatorDenyNamespaceFlag + strings.Join(denyNamespaces, ",") } if strings.HasPrefix(args[i], PrometheusConfigReloaderFlag) { args[i] = PrometheusConfigReloaderFlag + f.config.Images.PrometheusConfigReloader } if strings.HasPrefix(args[i], ConfigReloaderImageFlag) { args[i] = ConfigReloaderImageFlag + f.config.Images.ConfigmapReloader } if strings.HasPrefix(args[i], PrometheusOperatorAlertmanagerInstanceNamespacesFlag) { args[i] = PrometheusOperatorAlertmanagerInstanceNamespacesFlag + f.namespaceUserWorkload } if strings.HasPrefix(args[i], PrometheusOperatorPrometheusInstanceNamespacesFlag) { args[i] = PrometheusOperatorPrometheusInstanceNamespacesFlag + f.namespaceUserWorkload } } d.Spec.Template.Spec.Containers[0].Args = args d.Namespace = f.namespaceUserWorkload return d, nil } func (f *Factory) PrometheusRuleValidatingWebhook() (*admissionv1.ValidatingWebhookConfiguration, error) { wc, err := f.NewValidatingWebhook(MustAssetReader(PrometheusOperatorRuleValidatingWebhook)) if err != nil { return nil, err } return wc, nil } func (f *Factory) PrometheusOperatorService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(PrometheusOperatorService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusOperatorUserWorkloadService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(PrometheusOperatorUserWorkloadService)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) PrometheusK8sService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(PrometheusK8sService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) PrometheusUserWorkloadService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(PrometheusUserWorkloadService)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) GrafanaClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(GrafanaClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespace return crb, nil } func (f *Factory) GrafanaClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(GrafanaClusterRole)) } func (f *Factory) GrafanaConfig() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(GrafanaConfigSecret)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } type GrafanaDatasources struct { ApiVersion int `json:"apiVersion"` Datasources []*GrafanaDatasource `json:"datasources"` } type GrafanaDatasource struct { Access string `json:"access"` BasicAuth bool `json:"basicAuth"` BasicAuthPassword string `json:"basicAuthPassword"` BasicAuthUser string `json:"basicAuthUser"` Editable bool `json:"editable"` JsonData *GrafanaJsonData `json:"jsonData"` Name string `json:"name"` OrgId int `json:"orgId"` Type string `json:"type"` Url string `json:"url"` Version int `json:"version"` } type GrafanaJsonData struct { TlsSkipVerify bool `json:"tlsSkipVerify"` } func (f *Factory) GrafanaDatasources() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(GrafanaDatasourcesSecret)) if err != nil { return nil, err } d := &GrafanaDatasources{} err = json.Unmarshal(s.Data["datasources.yaml"], d) if err != nil { return nil, err } d.Datasources[0].BasicAuthPassword, err = GeneratePassword(255) if err != nil { return nil, err } b, err := json.MarshalIndent(d, "", " ") if err != nil { return nil, err } s.Data["prometheus.yaml"] = b s.Namespace = f.namespace return s, nil } func (f *Factory) GrafanaDashboardDefinitions() (*v1.ConfigMapList, error) { cl, err := f.NewConfigMapList(MustAssetReader(GrafanaDashboardDefinitions)) if err != nil { return nil, err } configmaps := []v1.ConfigMap{} for _, c := range cl.Items { c.Namespace = f.namespace if !f.config.ClusterMonitoringConfiguration.EtcdConfig.IsEnabled() { if c.GetName() != "grafana-dashboard-etcd" { configmaps = append(configmaps, c) } } else { configmaps = append(configmaps, c) } } cl.Items = configmaps return cl, nil } func (f *Factory) GrafanaDashboardSources() (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(GrafanaDashboardSources)) if err != nil { return nil, err } c.Namespace = f.namespace return c, nil } func (f *Factory) GrafanaTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(GrafanaTrustedCABundle)) if err != nil { return nil, err } return cm, nil } // GrafanaDeployment generates a new Deployment for Grafana. // If the passed ConfigMap is not empty it mounts the Trusted CA Bundle as a VolumeMount to // /etc/pki/ca-trust/extracted/pem/ location. func (f *Factory) GrafanaDeployment(proxyCABundleCM *v1.ConfigMap) (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(GrafanaDeployment)) if err != nil { return nil, err } d.Spec.Template.Spec.Containers[0].Image = f.config.Images.Grafana if !f.config.ClusterMonitoringConfiguration.EtcdConfig.IsEnabled() { vols := []v1.Volume{} volMounts := []v1.VolumeMount{} for _, v := range d.Spec.Template.Spec.Volumes { if v.Name != "grafana-dashboard-etcd" { vols = append(vols, v) } } for _, vm := range d.Spec.Template.Spec.Containers[0].VolumeMounts { if vm.Name != "grafana-dashboard-etcd" { volMounts = append(volMounts, vm) } } d.Spec.Template.Spec.Volumes = vols d.Spec.Template.Spec.Containers[0].VolumeMounts = volMounts } d.Spec.Template.Spec.Containers[1].Image = f.config.Images.OauthProxy setEnv := func(name, value string) { for i := range d.Spec.Template.Spec.Containers[1].Env { if d.Spec.Template.Spec.Containers[1].Env[i].Name == name { d.Spec.Template.Spec.Containers[1].Env[i].Value = value break } } } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv("HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv("HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv("NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } if f.config.ClusterMonitoringConfiguration.GrafanaConfig.NodeSelector != nil { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.GrafanaConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.GrafanaConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.GrafanaConfig.Tolerations } if proxyCABundleCM != nil { volumeName := "grafana-trusted-ca-bundle" d.Spec.Template.Spec.Containers[1].VolumeMounts = append(d.Spec.Template.Spec.Containers[1].VolumeMounts, trustedCABundleVolumeMount(volumeName)) volume := trustedCABundleVolume(proxyCABundleCM.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume) } d.Namespace = f.namespace return d, nil } func (f *Factory) GrafanaProxySecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(GrafanaProxySecret)) if err != nil { return nil, err } p, err := GeneratePassword(43) if err != nil { return nil, err } s.Data["session_secret"] = []byte(p) s.Namespace = f.namespace return s, nil } func (f *Factory) GrafanaRoute() (*routev1.Route, error) { r, err := f.NewRoute(MustAssetReader(GrafanaRoute)) if err != nil { return nil, err } if f.config.ClusterMonitoringConfiguration.GrafanaConfig.Hostport != "" { r.Spec.Host = f.config.ClusterMonitoringConfiguration.GrafanaConfig.Hostport } r.Namespace = f.namespace return r, nil } func (f *Factory) GrafanaServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(GrafanaServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) GrafanaService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(GrafanaService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) GrafanaServiceMonitor() (*monv1.ServiceMonitor, error) { s, err := f.NewServiceMonitor(MustAssetReader(GrafanaServiceMonitor)) if err != nil { return nil, err } s.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("grafana.%s.svc", f.namespace) s.Namespace = f.namespace return s, nil } func (f *Factory) ClusterMonitoringClusterRole() (*rbacv1.ClusterRole, error) { cr, err := f.NewClusterRole(MustAssetReader(ClusterMonitoringClusterRole)) if err != nil { return nil, err } return cr, nil } func (f *Factory) ClusterMonitoringRulesEditClusterRole() (*rbacv1.ClusterRole, error) { cr, err := f.NewClusterRole(MustAssetReader(ClusterMonitoringRulesEditClusterRole)) if err != nil { return nil, err } return cr, nil } func (f *Factory) ClusterMonitoringRulesViewClusterRole() (*rbacv1.ClusterRole, error) { cr, err := f.NewClusterRole(MustAssetReader(ClusterMonitoringRulesViewClusterRole)) if err != nil { return nil, err } return cr, nil } func (f *Factory) ClusterMonitoringEditClusterRole() (*rbacv1.ClusterRole, error) { cr, err := f.NewClusterRole(MustAssetReader(ClusterMonitoringEditClusterRole)) if err != nil { return nil, err } return cr, nil } func (f *Factory) ClusterMonitoringOperatorService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(ClusterMonitoringOperatorService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) ClusterMonitoringOperatorServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(ClusterMonitoringOperatorServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("cluster-monitoring-operator.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } func hostFromBaseAddress(baseAddress string) (string, error) { host, _, err := net.SplitHostPort(baseAddress) if err != nil && !IsMissingPortInAddressError(err) { return "", nil } if host == "" { return baseAddress, nil } return host, nil } func IsMissingPortInAddressError(err error) bool { switch e := err.(type) { case *net.AddrError: if e.Err == "missing port in address" { return true } } return false } func (f *Factory) NewDaemonSet(manifest io.Reader) (*appsv1.DaemonSet, error) { ds, err := NewDaemonSet(manifest) if err != nil { return nil, err } if ds.GetNamespace() == "" { ds.SetNamespace(f.namespace) } return ds, nil } func (f *Factory) NewService(manifest io.Reader) (*v1.Service, error) { s, err := NewService(manifest) if err != nil { return nil, err } if s.GetNamespace() == "" { s.SetNamespace(f.namespace) } return s, nil } func (f *Factory) NewEndpoints(manifest io.Reader) (*v1.Endpoints, error) { e, err := NewEndpoints(manifest) if err != nil { return nil, err } if e.GetNamespace() == "" { e.SetNamespace(f.namespace) } return e, nil } func (f *Factory) NewRoute(manifest io.Reader) (*routev1.Route, error) { r, err := NewRoute(manifest) if err != nil { return nil, err } if r.GetNamespace() == "" { r.SetNamespace(f.namespace) } return r, nil } func (f *Factory) NewSecret(manifest io.Reader) (*v1.Secret, error) { s, err := NewSecret(manifest) if err != nil { return nil, err } if s.GetNamespace() == "" { s.SetNamespace(f.namespace) } return s, nil } func (f *Factory) NewRoleBinding(manifest io.Reader) (*rbacv1.RoleBinding, error) { rb, err := NewRoleBinding(manifest) if err != nil { return nil, err } if rb.GetNamespace() == "" { rb.SetNamespace(f.namespace) } return rb, nil } func (f *Factory) NewRoleList(manifest io.Reader) (*rbacv1.RoleList, error) { rl, err := NewRoleList(manifest) if err != nil { return nil, err } for _, r := range rl.Items { if r.GetNamespace() == "" { r.SetNamespace(f.namespace) } } return rl, nil } func (f *Factory) NewRoleBindingList(manifest io.Reader) (*rbacv1.RoleBindingList, error) { rbl, err := NewRoleBindingList(manifest) if err != nil { return nil, err } for _, rb := range rbl.Items { if rb.GetNamespace() == "" { rb.SetNamespace(f.namespace) } } return rbl, nil } func (f *Factory) NewRole(manifest io.Reader) (*rbacv1.Role, error) { r, err := NewRole(manifest) if err != nil { return nil, err } if r.GetNamespace() == "" { r.SetNamespace(f.namespace) } return r, nil } func (f *Factory) NewConfigMap(manifest io.Reader) (*v1.ConfigMap, error) { cm, err := NewConfigMap(manifest) if err != nil { return nil, err } if cm.GetNamespace() == "" { cm.SetNamespace(f.namespace) } return cm, nil } func (f *Factory) NewConfigMapList(manifest io.Reader) (*v1.ConfigMapList, error) { cml, err := NewConfigMapList(manifest) if err != nil { return nil, err } for _, cm := range cml.Items { if cm.GetNamespace() == "" { cm.SetNamespace(f.namespace) } } return cml, nil } func (f *Factory) NewServiceAccount(manifest io.Reader) (*v1.ServiceAccount, error) { sa, err := NewServiceAccount(manifest) if err != nil { return nil, err } if sa.GetNamespace() == "" { sa.SetNamespace(f.namespace) } return sa, nil } func (f *Factory) NewPrometheus(manifest io.Reader) (*monv1.Prometheus, error) { p, err := NewPrometheus(manifest) if err != nil { return nil, err } if p.GetNamespace() == "" { p.SetNamespace(f.namespace) } return p, nil } func (f *Factory) NewPrometheusRule(manifest io.Reader) (*monv1.PrometheusRule, error) { p, err := NewPrometheusRule(manifest) if err != nil { return nil, err } if p.GetNamespace() == "" { p.SetNamespace(f.namespace) } return p, nil } func (f *Factory) NewTelemeterPrometheusRecRuleFromString(expr string) (*monv1.PrometheusRule, error) { p := &monv1.PrometheusRule{ ObjectMeta: metav1.ObjectMeta{ Name: "telemetry", }, Spec: monv1.PrometheusRuleSpec{ Groups: []monv1.RuleGroup{ { Name: "telemeter.rules", Rules: []monv1.Rule{ { Record: "cluster:telemetry_selected_series:count", Expr: intstr.FromString(expr), }, }, }, }, }, } if p.GetNamespace() == "" { p.SetNamespace(f.namespace) } return p, nil } func (f *Factory) NewAlertmanager(manifest io.Reader) (*monv1.Alertmanager, error) { a, err := NewAlertmanager(manifest) if err != nil { return nil, err } if a.GetNamespace() == "" { a.SetNamespace(f.namespace) } return a, nil } func (f *Factory) NewThanosRuler(manifest io.Reader) (*monv1.ThanosRuler, error) { t, err := NewThanosRuler(manifest) if err != nil { return nil, err } if t.GetNamespace() == "" { t.SetNamespace(f.namespaceUserWorkload) } return t, nil } func (f *Factory) NewServiceMonitor(manifest io.Reader) (*monv1.ServiceMonitor, error) { sm, err := NewServiceMonitor(manifest) if err != nil { return nil, err } if sm.GetNamespace() == "" { sm.SetNamespace(f.namespace) } return sm, nil } func (f *Factory) NewDeployment(manifest io.Reader) (*appsv1.Deployment, error) { d, err := NewDeployment(manifest) if err != nil { return nil, err } if d.GetNamespace() == "" { d.SetNamespace(f.namespace) } return d, nil } func (f *Factory) NewIngress(manifest io.Reader) (*v1beta1.Ingress, error) { i, err := NewIngress(manifest) if err != nil { return nil, err } if i.GetNamespace() == "" { i.SetNamespace(f.namespace) } return i, nil } func (f *Factory) NewAPIService(manifest io.Reader) (*apiregistrationv1beta1.APIService, error) { return NewAPIService(manifest) } func (f *Factory) NewSecurityContextConstraints(manifest io.Reader) (*securityv1.SecurityContextConstraints, error) { return NewSecurityContextConstraints(manifest) } func (f *Factory) NewClusterRoleBinding(manifest io.Reader) (*rbacv1.ClusterRoleBinding, error) { return NewClusterRoleBinding(manifest) } func (f *Factory) NewClusterRole(manifest io.Reader) (*rbacv1.ClusterRole, error) { return NewClusterRole(manifest) } func (f *Factory) NewValidatingWebhook(manifest io.Reader) (*admissionv1.ValidatingWebhookConfiguration, error) { return NewValidatingWebhook(manifest) } const ( // These constants refer to indices of prometheus-k8s containers. // They need to be in sync with jsonnet/prometheus.jsonnet THANOS_QUERIER_CONTAINER_THANOS = 0 THANOS_QUERIER_CONTAINER_OAUTH_PROXY = 1 THANOS_QUERIER_CONTAINER_KUBE_RBAC_PROXY = 2 THANOS_QUERIER_CONTAINER_PROM_LABEL_PROXY = 3 ) func (f *Factory) ThanosQuerierDeployment(grpcTLS *v1.Secret, enableUserWorkloadMonitoring bool, trustedCA *v1.ConfigMap) (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(ThanosQuerierDeployment)) if err != nil { return nil, err } setEnv := func(name, value string) { for i := range d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].Env { if d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].Env[i].Name == name { d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].Env[i].Value = value break } } } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv("HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv("HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv("NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } d.Namespace = f.namespace d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_THANOS].Image = f.config.Images.Thanos d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].Image = f.config.Images.OauthProxy d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_KUBE_RBAC_PROXY].Image = f.config.Images.KubeRbacProxy d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_PROM_LABEL_PROXY].Image = f.config.Images.PromLabelProxy if enableUserWorkloadMonitoring { d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_THANOS].Args = append( d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_THANOS].Args, "--store=dnssrv+_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local", "--store=dnssrv+_grpc._tcp.thanos-ruler-operated.openshift-user-workload-monitoring.svc.cluster.local", "--rule=dnssrv+_grpc._tcp.prometheus-operated.openshift-user-workload-monitoring.svc.cluster.local", "--rule=dnssrv+_grpc._tcp.thanos-ruler-operated.openshift-user-workload-monitoring.svc.cluster.local", ) } d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, v1.Volume{ Name: "secret-grpc-tls", VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: grpcTLS.GetName(), }, }, }) if trustedCA != nil { volumeName := "thanos-querier-trusted-ca-bundle" d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].VolumeMounts = append( d.Spec.Template.Spec.Containers[THANOS_QUERIER_CONTAINER_OAUTH_PROXY].VolumeMounts, trustedCABundleVolumeMount(volumeName), ) volume := trustedCABundleVolume(trustedCA.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume) } if f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.Resources != nil { for i, c := range d.Spec.Template.Spec.Containers { if c.Name == "thanos-query" { d.Spec.Template.Spec.Containers[i].Resources = *f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.Resources } } } if f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.NodeSelector != nil { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.ThanosQuerierConfig.Tolerations } return d, nil } func (f *Factory) ThanosQuerierTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(ThanosQuerierTrustedCABundle)) if err != nil { return nil, err } return cm, nil } func (f *Factory) ThanosQuerierService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(ThanosQuerierService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } func (f *Factory) ThanosQuerierPrometheusRule() (*monv1.PrometheusRule, error) { return f.NewPrometheusRule(MustAssetReader(ThanosQuerierPrometheusRule)) } func (f *Factory) ThanosQuerierServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(ThanosQuerierServiceMonitor))
return nil, err } var found bool const endpointPort = "web" for i := range sm.Spec.Endpoints { if sm.Spec.Endpoints[i].Port == endpointPort { found = true sm.Spec.Endpoints[i].TLSConfig.ServerName = fmt.Sprintf("thanos-querier.%s.svc", f.namespace) } } if !found { return nil, errors.Errorf("failed to find endpoint port %q", endpointPort) } sm.Namespace = f.namespace return sm, nil } func (f *Factory) TelemeterTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(TelemeterTrustedCABundle)) if err != nil { return nil, err } return cm, nil } // TelemeterClientServingCertsCABundle generates a new servinc certs CA bundle ConfigMap for TelemeterClient. func (f *Factory) TelemeterClientServingCertsCABundle() (*v1.ConfigMap, error) { c, err := f.NewConfigMap(MustAssetReader(TelemeterClientServingCertsCABundle)) if err != nil { return nil, err } c.Namespace = f.namespace return c, nil } // TelemeterClientClusterRole generates a new ClusterRole for Telemeter client. func (f *Factory) TelemeterClientClusterRole() (*rbacv1.ClusterRole, error) { cr, err := f.NewClusterRole(MustAssetReader(TelemeterClientClusterRole)) if err != nil { return nil, err } return cr, nil } // TelemeterClientClusterRoleBinding generates a new ClusterRoleBinding for Telemeter client. func (f *Factory) TelemeterClientClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(TelemeterClientClusterRoleBinding)) if err != nil { return nil, err } return crb, nil } // TelemeterClientClusterRoleBindingView generates a new ClusterRoleBinding for Telemeter client // for the cluster monitoring view ClusterRole. func (f *Factory) TelemeterClientClusterRoleBindingView() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(TelemeterClientClusterRoleBindingView)) if err != nil { return nil, err } return crb, nil } // TelemeterClientServiceMonitor generates a new ServiceMonitor for Telemeter client. func (f *Factory) TelemeterClientServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(TelemeterClientServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("telemeter-client.%s.svc", f.namespace) sm.Namespace = f.namespace return sm, nil } // TelemeterClientDeployment generates a new Deployment for Telemeter client. // If the passed ConfigMap is not empty it mounts the Trusted CA Bundle as a VolumeMount to // /etc/pki/ca-trust/extracted/pem/ location. func (f *Factory) TelemeterClientDeployment(proxyCABundleCM *v1.ConfigMap) (*appsv1.Deployment, error) { d, err := f.NewDeployment(MustAssetReader(TelemeterClientDeployment)) if err != nil { return nil, err } setEnv := func(name, value string) { for i := range d.Spec.Template.Spec.Containers[0].Env { if d.Spec.Template.Spec.Containers[0].Env[i].Name == name { d.Spec.Template.Spec.Containers[0].Env[i].Value = value break } } } if f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.ClusterID != "" { setEnv("ID", f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.ClusterID) } if f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.TelemeterServerURL != "" { setEnv("TO", f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.TelemeterServerURL) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv("HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv("HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv("NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } d.Spec.Template.Spec.Containers[0].Image = f.config.Images.TelemeterClient d.Spec.Template.Spec.Containers[1].Image = f.config.Images.ConfigmapReloader d.Spec.Template.Spec.Containers[2].Image = f.config.Images.KubeRbacProxy cmd := []string{} for _, a := range d.Spec.Template.Spec.Containers[0].Command { if !strings.HasPrefix(a, "--match=") { cmd = append(cmd, a) } } for _, m := range f.config.ClusterMonitoringConfiguration.PrometheusK8sConfig.TelemetryMatches { cmd = append(cmd, fmt.Sprintf("--match=%s", m)) } cmd = append(cmd, "--limit-bytes=5242880") d.Spec.Template.Spec.Containers[0].Command = cmd if len(f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.NodeSelector) > 0 { d.Spec.Template.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.Tolerations) > 0 { d.Spec.Template.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.Tolerations } d.Namespace = f.namespace if proxyCABundleCM != nil { volumeName := "telemeter-trusted-ca-bundle" d.Spec.Template.Spec.Containers[0].VolumeMounts = append(d.Spec.Template.Spec.Containers[0].VolumeMounts, trustedCABundleVolumeMount(volumeName)) volume := trustedCABundleVolume(proxyCABundleCM.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) d.Spec.Template.Spec.Volumes = append(d.Spec.Template.Spec.Volumes, volume) } return d, nil } // TelemeterClientService generates a new Service for Telemeter client. func (f *Factory) TelemeterClientService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(TelemeterClientService)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } // TelemeterClientServiceAccount generates a new ServiceAccount for Telemeter client. func (f *Factory) TelemeterClientServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(TelemeterClientServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespace return s, nil } // TelemeterClientSecret generates a new Secret for Telemeter client. func (f *Factory) TelemeterClientSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(TelemeterClientSecret)) if err != nil { return nil, err } salt, err := GeneratePassword(32) if err != nil { return nil, fmt.Errorf("failed to generate Telemeter client salt: %v", err) } s.Data["salt"] = []byte(salt) if f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.Token != "" { s.Data["token"] = []byte(f.config.ClusterMonitoringConfiguration.TelemeterClientConfig.Token) } s.Namespace = f.namespace return s, nil } func (f *Factory) ThanosRulerService() (*v1.Service, error) { s, err := f.NewService(MustAssetReader(ThanosRulerService)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosRulerServiceAccount() (*v1.ServiceAccount, error) { s, err := f.NewServiceAccount(MustAssetReader(ThanosRulerServiceAccount)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosRulerClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(ThanosRulerClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespaceUserWorkload return crb, nil } func (f *Factory) ThanosRulerMonitoringClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { crb, err := f.NewClusterRoleBinding(MustAssetReader(ThanosRulerMonitoringClusterRoleBinding)) if err != nil { return nil, err } crb.Subjects[0].Namespace = f.namespaceUserWorkload return crb, nil } func (f *Factory) ThanosRulerClusterRole() (*rbacv1.ClusterRole, error) { return f.NewClusterRole(MustAssetReader(ThanosRulerClusterRole)) } func (f *Factory) ThanosRulerPrometheusRule() (*monv1.PrometheusRule, error) { return f.NewPrometheusRule(MustAssetReader(ThanosRulerPrometheusRule)) } func (f *Factory) ThanosRulerServiceMonitor() (*monv1.ServiceMonitor, error) { sm, err := f.NewServiceMonitor(MustAssetReader(ThanosRulerServiceMonitor)) if err != nil { return nil, err } sm.Spec.Endpoints[0].TLSConfig.ServerName = fmt.Sprintf("thanos-ruler.%s.svc", f.namespaceUserWorkload) sm.Namespace = f.namespaceUserWorkload return sm, nil } func (f *Factory) ThanosRulerRoute() (*routev1.Route, error) { r, err := f.NewRoute(MustAssetReader(ThanosRulerRoute)) if err != nil { return nil, err } r.Namespace = f.namespaceUserWorkload return r, nil } func (f *Factory) ThanosRulerTrustedCABundle() (*v1.ConfigMap, error) { cm, err := f.NewConfigMap(MustAssetReader(ThanosRulerTrustedCABundle)) if err != nil { return nil, err } return cm, nil } func (f *Factory) ThanosRulerGrpcTLSSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosRulerGrpcTLSSecret)) if err != nil { return nil, err } s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosRulerOauthCookieSecret() (*v1.Secret, error) { s, err := f.NewSecret(MustAssetReader(ThanosRulerOauthCookieSecret)) if err != nil { return nil, err } p, err := GeneratePassword(43) if err != nil { return nil, err } s.Data["session_secret"] = []byte(p) s.Namespace = f.namespaceUserWorkload return s, nil } func (f *Factory) ThanosRulerCustomResource(queryURL string, trustedCA *v1.ConfigMap, grpcTLS *v1.Secret) (*monv1.ThanosRuler, error) { t, err := f.NewThanosRuler(MustAssetReader(ThanosRulerCustomResource)) if err != nil { return nil, err } t.Spec.Image = f.config.Images.Thanos if f.config.UserWorkloadConfiguration.ThanosRuler.LogLevel != "" { t.Spec.LogLevel = f.config.UserWorkloadConfiguration.ThanosRuler.LogLevel } if f.config.UserWorkloadConfiguration.ThanosRuler.Resources != nil { t.Spec.Resources = *f.config.UserWorkloadConfiguration.ThanosRuler.Resources } if f.config.UserWorkloadConfiguration.ThanosRuler.VolumeClaimTemplate != nil { t.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.UserWorkloadConfiguration.ThanosRuler.VolumeClaimTemplate, } } if f.config.UserWorkloadConfiguration.ThanosRuler.NodeSelector != nil { t.Spec.NodeSelector = f.config.UserWorkloadConfiguration.ThanosRuler.NodeSelector } if len(f.config.UserWorkloadConfiguration.ThanosRuler.Tolerations) > 0 { t.Spec.Tolerations = f.config.UserWorkloadConfiguration.ThanosRuler.Tolerations } // TODO: remove in 4.7 if f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.LogLevel != "" { t.Spec.LogLevel = f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.LogLevel } if f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.Resources != nil { t.Spec.Resources = *f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.Resources } if f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.VolumeClaimTemplate != nil { t.Spec.Storage = &monv1.StorageSpec{ VolumeClaimTemplate: *f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.VolumeClaimTemplate, } } if f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.NodeSelector != nil { t.Spec.NodeSelector = f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.NodeSelector } if len(f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.Tolerations) > 0 { t.Spec.Tolerations = f.config.ClusterMonitoringConfiguration.ThanosRulerConfig.Tolerations } // end of remove t.Spec.Containers[1].Image = f.config.Images.OauthProxy setEnv := func(name, value string) { for i := range t.Spec.Containers[1].Env { if t.Spec.Containers[1].Env[i].Name == name { t.Spec.Containers[1].Env[i].Value = value break } } } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy != "" { setEnv("HTTP_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy != "" { setEnv("HTTPS_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.HTTPSProxy) } if f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy != "" { setEnv("NO_PROXY", f.config.ClusterMonitoringConfiguration.HTTPConfig.NoProxy) } // Mounting TLS secret to thanos-ruler if grpcTLS == nil { return nil, errors.New("could not generate thanos ruler CRD: GRPC TLS secret was not found") } secretName := "secret-grpc-tls" secretVolume := v1.Volume{ Name: secretName, VolumeSource: v1.VolumeSource{ Secret: &v1.SecretVolumeSource{ SecretName: grpcTLS.GetName(), }, }, } t.Spec.Volumes = append(t.Spec.Volumes, secretVolume) if trustedCA != nil { volumeName := "thanos-ruler-trusted-ca-bundle" t.Spec.Containers[1].VolumeMounts = append( t.Spec.Containers[1].VolumeMounts, trustedCABundleVolumeMount(volumeName), ) volume := trustedCABundleVolume(trustedCA.Name, volumeName) volume.VolumeSource.ConfigMap.Items = append(volume.VolumeSource.ConfigMap.Items, v1.KeyToPath{ Key: TrustedCABundleKey, Path: "tls-ca-bundle.pem", }) t.Spec.Volumes = append(t.Spec.Volumes, volume) } if queryURL != "" { t.Spec.AlertQueryURL = queryURL } t.Namespace = f.namespaceUserWorkload return t, nil } func NewDaemonSet(manifest io.Reader) (*appsv1.DaemonSet, error) { ds := appsv1.DaemonSet{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&ds) if err != nil { return nil, err } return &ds, nil } func NewService(manifest io.Reader) (*v1.Service, error) { s := v1.Service{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s) if err != nil { return nil, err } return &s, nil } func NewEndpoints(manifest io.Reader) (*v1.Endpoints, error) { e := v1.Endpoints{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&e) if err != nil { return nil, err } return &e, nil } func NewRoute(manifest io.Reader) (*routev1.Route, error) { r := routev1.Route{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&r) if err != nil { return nil, err } return &r, nil } func NewSecret(manifest io.Reader) (*v1.Secret, error) { s := v1.Secret{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s) if err != nil { return nil, err } return &s, nil } func NewClusterRoleBinding(manifest io.Reader) (*rbacv1.ClusterRoleBinding, error) { crb := rbacv1.ClusterRoleBinding{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&crb) if err != nil { return nil, err } return &crb, nil } func NewClusterRole(manifest io.Reader) (*rbacv1.ClusterRole, error) { cr := rbacv1.ClusterRole{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&cr) if err != nil { return nil, err } return &cr, nil } func NewRoleBinding(manifest io.Reader) (*rbacv1.RoleBinding, error) { rb := rbacv1.RoleBinding{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&rb) if err != nil { return nil, err } return &rb, nil } func NewRole(manifest io.Reader) (*rbacv1.Role, error) { r := rbacv1.Role{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&r) if err != nil { return nil, err } return &r, nil } func NewRoleBindingList(manifest io.Reader) (*rbacv1.RoleBindingList, error) { rbl := rbacv1.RoleBindingList{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&rbl) if err != nil { return nil, err } return &rbl, nil } func NewRoleList(manifest io.Reader) (*rbacv1.RoleList, error) { rl := rbacv1.RoleList{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&rl) if err != nil { return nil, err } return &rl, nil } func NewConfigMap(manifest io.Reader) (*v1.ConfigMap, error) { cm := v1.ConfigMap{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&cm) if err != nil { return nil, err } return &cm, nil } func NewConfigMapList(manifest io.Reader) (*v1.ConfigMapList, error) { cml := v1.ConfigMapList{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&cml) if err != nil { return nil, err } return &cml, nil } func NewServiceAccount(manifest io.Reader) (*v1.ServiceAccount, error) { sa := v1.ServiceAccount{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&sa) if err != nil { return nil, err } return &sa, nil } func NewPrometheus(manifest io.Reader) (*monv1.Prometheus, error) { p := monv1.Prometheus{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&p) if err != nil { return nil, err } return &p, nil } func NewPrometheusRule(manifest io.Reader) (*monv1.PrometheusRule, error) { p := monv1.PrometheusRule{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&p) if err != nil { return nil, err } return &p, nil } func NewAlertmanager(manifest io.Reader) (*monv1.Alertmanager, error) { a := monv1.Alertmanager{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&a) if err != nil { return nil, err } return &a, nil } func NewThanosRuler(manifest io.Reader) (*monv1.ThanosRuler, error) { t := monv1.ThanosRuler{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&t) if err != nil { return nil, err } return &t, nil } func NewServiceMonitor(manifest io.Reader) (*monv1.ServiceMonitor, error) { sm := monv1.ServiceMonitor{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&sm) if err != nil { return nil, err } return &sm, nil } func NewDeployment(manifest io.Reader) (*appsv1.Deployment, error) { d := appsv1.Deployment{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&d) if err != nil { return nil, err } return &d, nil } func NewIngress(manifest io.Reader) (*v1beta1.Ingress, error) { i := v1beta1.Ingress{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&i) if err != nil { return nil, err } return &i, nil } func NewAPIService(manifest io.Reader) (*apiregistrationv1beta1.APIService, error) { s := apiregistrationv1beta1.APIService{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s) if err != nil { return nil, err } return &s, nil } func NewSecurityContextConstraints(manifest io.Reader) (*securityv1.SecurityContextConstraints, error) { s := securityv1.SecurityContextConstraints{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&s) if err != nil { return nil, err } return &s, nil } func NewValidatingWebhook(manifest io.Reader) (*admissionv1.ValidatingWebhookConfiguration, error) { v := admissionv1.ValidatingWebhookConfiguration{} err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&v) if err != nil { return nil, err } return &v, nil } // HashTrustedCA synthesizes a configmap just by copying "ca-bundle.crt" from the given configmap // and naming it by hashing the contents of "ca-bundle.crt". // It adds "monitoring.openshift.io/name" and "monitoring.openshift.io/hash" labels. // Any other labels from the given configmap are discarded. // // It returns an error if the given configmap does not contain the "ca-bundle.crt" data key // or data is empty string. func (f *Factory) HashTrustedCA(caBundleCM *v1.ConfigMap, prefix string) (*v1.ConfigMap, error) { caBundle, ok := caBundleCM.Data[TrustedCABundleKey] if !ok { return nil, errors.Errorf("CA bundle key %q missing", TrustedCABundleKey) } if caBundle == "" { return nil, errors.Errorf("CA bundle key %q empty", TrustedCABundleKey) } h := fnv.New64() h.Write([]byte(caBundle)) hash := strconv.FormatUint(h.Sum64(), 32) ns := f.namespace if caBundleCM.ObjectMeta.Namespace != "" { ns = caBundleCM.ObjectMeta.Namespace } return &v1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: fmt.Sprintf("%s-trusted-ca-bundle-%s", prefix, hash), Labels: map[string]string{ "monitoring.openshift.io/name": prefix, "monitoring.openshift.io/hash": hash, }, }, Data: map[string]string{ TrustedCABundleKey: caBundle, }, }, nil } // HashSecret synthesizes a secret by setting the given data // and naming it by hashing the values of the given data. // // For simplicity, data is expected to be given in a key-value format, // i.e. HashSecret(someSecret, value1, key1, value2, key2, ...). // // It adds "monitoring.openshift.io/name" and "monitoring.openshift.io/hash" labels. // Any other labels from the given secret are discarded. // // It still returns a secret if the given secret does not contain any data. func (f *Factory) HashSecret(secret *v1.Secret, data ...string) (*v1.Secret, error) { h := fnv.New64() m := make(map[string][]byte) var err error for i := 0; i < len(data)/2; i++ { k := data[i*2] v := []byte(data[i*2+1]) _, err = h.Write(v) m[k] = v } if err != nil { return nil, errors.Wrap(err, "error hashing tls data") } hash := strconv.FormatUint(h.Sum64(), 32) return &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Namespace: secret.GetNamespace(), Name: fmt.Sprintf("%s-%s", secret.GetName(), hash), Labels: map[string]string{ "monitoring.openshift.io/name": secret.GetName(), "monitoring.openshift.io/hash": hash, }, }, Data: m, }, nil } func trustedCABundleVolumeMount(name string) v1.VolumeMount { return v1.VolumeMount{ Name: name, ReadOnly: true, MountPath: "/etc/pki/ca-trust/extracted/pem/", } } func trustedCABundleVolume(configMapName, volumeName string) v1.Volume { yes := true return v1.Volume{ Name: volumeName, VolumeSource: v1.VolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{ LocalObjectReference: v1.LocalObjectReference{ Name: configMapName, }, Optional: &yes, }, }, } }
if err != nil {
auth.ts
import { getApiPath } from './common' export const auth = {
login: (includeSurvey?: string): string => `${getApiPath('auth', 'login')}${includeSurvey ? '?includeSurvey=true' : ''}`, }
18.d11fa765bbbff13c6e10.chunk.js
webpackJsonp([18,19],{752:function(e,n,t){"use strict";function
(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function o(e,n){if(!(e instanceof n))throw new TypeError("Cannot call a class as a function")}function a(e,n){if(!e)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!n||"object"!=typeof n&&"function"!=typeof n?e:n}function i(e,n){if("function"!=typeof n&&null!==n)throw new TypeError("Super expression must either be null or a function, not "+typeof n);e.prototype=Object.create(n&&n.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}}),n&&(Object.setPrototypeOf?Object.setPrototypeOf(e,n):e.__proto__=n)}var c=t(8),l=c&&c.__esModule?function(){return c["default"]}:function(){return c};t.d(l,"a",l);var u=function(){var e="function"==typeof Symbol&&Symbol["for"]&&Symbol["for"]("react.element")||60103;return function(n,t,r,o){var a=n&&n.defaultProps,i=arguments.length-3;if(t||0===i||(t={}),t&&a)for(var c in a)void 0===t[c]&&(t[c]=a[c]);else t||(t=a||{});if(1===i)t.children=o;else if(i>1){for(var l=Array(i),u=0;u<i;u++)l[u]=arguments[u+3];t.children=l}return{$$typeof:e,type:n,key:void 0===r?null:""+r,ref:null,props:t,_owner:null}}}(),f=function(){function e(e,n){for(var t=0;t<n.length;t++){var r=n[t];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(e,r.key,r)}}return function(n,t,r){return t&&e(n.prototype,t),r&&e(n,r),n}}(),s=function(e){function n(e){o(this,n);var t=a(this,(n.__proto__||Object.getPrototypeOf(n)).call(this,e));return t.state={clara:null},t}return i(n,e),f(n,[{key:"componentDidMount",value:function(){var e=this,n=claraplayer(this.refs.claraplayer);n.sceneIO.fetchAndUse("0cd9a9a2-c113-4b0d-ae91-baa86d3ded4a").then(function(){return e.setState({clara:n})}),["orbit","pan","zoom","home","fullscreen"].forEach(function(e){n.player.hideTool(e)})}},{key:"componentDidUpdate",value:function(){this.state.clara.player.resize()}},{key:"importScene",value:function(e){var n=this.state.clara,t=0;n.sceneIO.fetch(e).then(function(){var o=n.scene.find({from:{id:e},name:"Objects"}),a=n.scene.filter({from:{id:o},type:["PolyMesh","BinMesh","Null"]}),i=n.scene.find({type:"Objects"});n.sceneGraph.addNode({name:"Import Null",parent:i,type:"Null",plugs:{Transform:[["Transform",{translation:{x:0,y:(t+1)/2,z:0}}]]}}).then(function(e){n.sceneGraph.clone(a,r({},o,e)).then(function(e){console.log("new nodes",e)})})})}},{key:"render",value:function(){var e=this;return u("div",{},void 0,l.a.createElement("div",{ref:"claraplayer",style:{width:800,height:600}}),u("div",{},void 0,u("button",{onClick:function(n){return e.importScene("7f92faad-1cd0-4e75-b76c-c0c564e809f2")}},void 0,"import")))}}]),n}(c.Component);n["default"]=s}});
r
release_test.go
package liferay import ( "testing" "github.com/stretchr/testify/assert" ) func TestDeployFolderRelease(t *testing.T) { release := Release{} assert := assert.New(t) assert.Equal(release.GetLiferayHome()+"/deploy", release.GetDeployFolder()) } func TestGetContainerNameRelease(t *testing.T)
func TestGetFullyQualifiedNameRelease(t *testing.T) { release := Release{Tag: "foo"} assert := assert.New(t) assert.Equal("mdelapenya/liferay-portal:foo", release.GetFullyQualifiedName()) } func TestGetLiferayHomeReleaseLatest(t *testing.T) { release := Release{Tag: "latest"} assert := assert.New(t) assert.Equal("/liferay", release.GetLiferayHome()) } func TestGetLiferayHomeRelease7_1M1(t *testing.T) { release := Release{Tag: "7.1-ce-m1-tomcat-hsql"} assert := assert.New(t) assert.Equal("/liferay", release.GetLiferayHome()) } func TestGetLiferayHomeRelease7Ga5(t *testing.T) { testGetLiferayHomeRelease7Ga(t, "5") } func TestGetLiferayHomeRelease7Ga4(t *testing.T) { testGetLiferayHomeRelease7Ga(t, "4") } func TestGetLiferayHomeRelease7Ga3(t *testing.T) { testGetLiferayHomeRelease7Ga(t, "3") } func TestGetLiferayHomeRelease7Ga2(t *testing.T) { testGetLiferayHomeRelease7Ga(t, "2") } func TestGetLiferayHomeRelease7Ga1(t *testing.T) { testGetLiferayHomeRelease7Ga(t, "1") } func TestGetLiferayHomeRelease6_2Ga6(t *testing.T) { release := Release{Tag: "6.2-ce-ga6-tomcat-hsql"} assert := assert.New(t) assert.Equal("/usr/local/liferay-portal-6.2-ce-ga1", release.GetLiferayHome()) } func TestGetLiferayHomeRelease6_1Ga1(t *testing.T) { release := Release{Tag: "6.1-ce-ga1-tomcat-hsql"} assert := assert.New(t) assert.Equal("/usr/local/liferay-portal-6.1.0-ce-ga1", release.GetLiferayHome()) } func TestGetLiferayHomeReleaseNoTag(t *testing.T) { release := Release{} assert := assert.New(t) assert.Equal("/liferay", release.GetLiferayHome()) } func TestGetReleasesRepository(t *testing.T) { release := Release{} assert := assert.New(t) releases := release.GetRepository() assert.Equal("mdelapenya/liferay-portal", releases) } func testGetLiferayHomeRelease7Ga(t *testing.T, ga string) { release := Release{Tag: "7-ce-ga" + ga + "-tomcat-hsql"} assert := assert.New(t) assert.Equal("/usr/local/liferay-ce-portal-7.0-ga"+ga, release.GetLiferayHome()) } func TestGetTypeRelease(t *testing.T) { release := Release{} assert := assert.New(t) assert.Equal("release", release.GetType()) }
{ release := Release{} assert := assert.New(t) assert.Equal("lpn-release", release.GetContainerName()) }
run_squad_trainer.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for question-answering.""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, HfArgumentParser, SquadDataset from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers import Trainer, TrainingArguments from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments:
def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
""" Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
day14.rs
//! Link: https://adventofcode.com/2019/day/14 //! Day 14: Space Stoichiometry //! //! As you approach the rings of Saturn, your ship's low fuel indicator turns on. //! There isn't any fuel here, but the rings have plenty of raw material. //! Perhaps your ship's Inter-Stellar Refinery Union brand nanofactory //! can turn these raw materials into fuel. //! //! You ask the nanofactory to produce a list of the reactions //! it can perform that are relevant to this process (your puzzle input). //! Every reaction turns some quantities of specific input chemicals into //! some quantity of an output chemical. Almost every chemical is //! produced by exactly one reaction; the only exception, //! ORE, is the raw material input to the entire process and is not produced by a reaction. //! //! You just need to know how much ORE you'll need to collect //! before you can produce one unit of FUEL. use std::collections::HashMap; struct Formulas(HashMap<String, (usize, Vec<(usize, String)>)>); // Each reaction gives specific quantities for its inputs and output; // reactions cannot be partially run, so only whole integer // multiples of these quantities can be used. // (It's okay to have leftover chemicals when you're done, though.) #[aoc_generator(day14)] fn input_generator(input: &str) -> Formulas
impl Formulas { pub fn resolve(&self, item: String, count: usize) -> usize { self.resolve_recursive(&item, count, &mut HashMap::new()) } fn resolve_recursive(&self, item: &String, mut count: usize, extras: &mut HashMap<String, usize>) -> usize { if item == "ORE" { return count; } if let Some(n) = extras.remove(item) { use std::cmp::Ordering::{Greater, Equal, Less}; match count.cmp(&n) { Greater => count -= n, Equal => return 0, Less => { extras.insert(item.clone(), n - count); return 0}, } } let (reaction_count, reactors) = &self.0[item]; let needed_reactions = match count % reaction_count { 0 => count / reaction_count, x => { *extras.entry(item.clone()).or_default() += reaction_count - x; count / reaction_count + 1 } }; reactors.iter().map(|(reactor_count, reactor)| { self.resolve_recursive(reactor, needed_reactions * reactor_count, extras) }).sum::<usize>() } } // Given the list of reactions in your puzzle input, // what is the minimum amount of ORE required to produce exactly 1 FUEL? // // Your puzzle answer was 857266. #[aoc(day14, part1, Recursion)] fn solve_part1_recursion(f: &Formulas) -> usize { f.resolve("FUEL".to_owned(), 1) } // After collecting ORE for a while, you check your cargo hold: 1 trillion (1000000000000) units of ORE. // Given 1 trillion ORE, what is the maximum amount of FUEL you can produce? // // Your puzzle answer was 2144702. #[aoc(day14, part2, Recursion)] fn solve_part2_recursion(f: &Formulas) -> usize { let max_ore = f.resolve("FUEL".to_owned(), 1_000_000_000); let ratio = max_ore as f64 / 1_000_000_000.0; let mut guess = (1e12 / ratio) as usize + 1; while f.resolve("FUEL".to_owned(), guess) < 10_usize.pow(12) { guess += 2; } while f.resolve("FUEL".to_owned(), guess) > 10_usize.pow(12) { guess -= 1; } guess }
{ Formulas(input .trim() .lines() .map(|l| { let sides: Vec<_> = l.split(" => ").collect(); let ingr = sides[0].split(',').map(|pair| { let pair: Vec<_> = pair.split_whitespace().collect(); let num = pair[0].parse().unwrap(); let ident = pair[1]; (num, ident.to_owned()) }).collect(); let result: Vec<_> = sides[1].split_whitespace().collect(); let num = result[0].parse().unwrap(); let ident = result[1]; (ident.to_owned(), (num, ingr)) }).collect()) }
category.controller.ts
import { Body, Controller, Get, Post, UseGuards } from '@nestjs/common'; import { ApiBearerAuth, ApiResponse } from '@nestjs/swagger'; import { JwtAuthGuard } from '../auth/jwt-auth.guard'; import CategoryService from './category.service'; import CreateCategoryDto from '../dto/create-category.dto'; @ApiBearerAuth() @UseGuards(JwtAuthGuard) @Controller('category') export default class
{ constructor(private readonly categoryService: CategoryService) {} @ApiBearerAuth() @UseGuards(JwtAuthGuard) @ApiResponse({ status: 200, description: 'Creates category successfully' }) @Post() postCategory(@Body() category: CreateCategoryDto) { return this.categoryService.insert(category); } @ApiBearerAuth() @UseGuards(JwtAuthGuard) @ApiResponse({ status: 200, description: 'Returns all categories successfully', }) @Get() getAll() { return this.categoryService.getAll(); } }
CategoryController
schrutepy.py
import pandas
""" full_path = "https://github.com/bradlindblad/schrutepy/raw/master/data/schrute.csv" df = pandas.read_csv(full_path) df = df.drop("Unnamed: 0", axis=1) return df
def load_schrute(): """ The entire script transcriptions from The Office in pandas dataframe format.
auth.routes.js
const express = require("express"); const UserSignIn = require("../controllers/auth/SignIn"); const UserSignUp = require("../controllers/auth/SignUp"); const { validateSignUp, validateSignIn } = require("../middleware/checkReq"); /**
* @description Routes for user authentication. * @author Harrsh Patel <[email protected]> * @route /todo/api/auth/* */ const AuthRoute = express.Router(); AuthRoute.post("/signin", validateSignIn, UserSignIn); AuthRoute.post("/signup", validateSignUp, UserSignUp); module.exports = AuthRoute;
soln.py
# This solution may be wrong class
: def minNumberOfSemesters(self, n: int, dependencies: List[List[int]], k: int) -> int: graph = [[] for _ in range(n)] memo_indegrees = [0] * n for u, v in dependencies: u -= 1 v -= 1 memo_indegrees[v] += 1 graph[u].append(v) levels = [] indegrees = memo_indegrees[:] frees = {i for i in range(n) if not indegrees[i]} node_levels = {} level = 0 while frees: nxt_level = set() levels.append(frees) for u in frees: node_levels[u] = level for v in graph[u]: indegrees[v] -= 1 if indegrees[v] == 0: nxt_level.add(v) frees = nxt_level level += 1 levels.append(set()) # print(levels) # print(node_levels) frees = [(0, -sum(v in levels[1] for v in graph[i]), i) for i in levels[0]] heapq.heapify(frees) ans = 0 indegrees = memo_indegrees[:] while frees: ans += 1 ntake = min(len(frees), k) for _ in range(ntake): level, _, u = heapq.heappop(frees) for v in graph[u]: indegrees[v] -= 1 if indegrees[v] == 0: level = node_levels[v] heapq.heappush(frees, (level, -sum(vv in levels[level + 1] for vv in graph[v]), v)) return ans
Solution
setup-wizard.js
jQuery( document ).ready(function() { uwp_wizard_check_plugins(); jQuery(document).on('click', '.uwp_install_plugins', function () { uwp_wizard_check_plugins(); }); }); function uwp_wizard_install_plugin( $slug,$nonce ) { var data = { 'action': 'install-plugin', '_ajax_nonce': $nonce, 'slug': $slug }; jQuery.ajax({ type: "POST", url: uwp_wizard_obj.ajaxurl, data: data, beforeSend: function() { jQuery( "."+$slug + " .uwp-plugin-status").html(jQuery('#uwp-installing-text').val()); }, success: function(data) { console.log(data); if(data.success){ jQuery( "."+$slug + " .uwp-plugin-status").html(jQuery('#uwp-installed-text').val()); jQuery( "."+$slug + " input:checkbox").removeClass('uwp_install_plugins').prop("disabled", true); uwp_wizard_check_plugins(); uwp_wizard_install_plugins($nonce); if(data.data.activateUrl){ uwp_wizard_activate_plugin(data.data.activateUrl,$slug); } }else{ alert('something went wrong'); } } }); } function uwp_wizard_activate_plugin($url,$slug){ jQuery.post($url, function(data, status){ console.log($slug+'plugin activated') }); } function uwp_wizard_install_plugins($nonce){ var $result = ''; jQuery('.uwp_install_plugins').each(function() { if(this.checked){ $result = uwp_wizard_install_plugin(this.id,$nonce); jQuery('.uwp-install-recommend').prop("disabled", true); return false; } }); } function uwp_wizard_check_plugins(){ var $install = ''; jQuery('.uwp_install_plugins').each(function() { $install += this.checked ? "1," : ""; }); if($install){ jQuery('.uwp-install-recommend').show();
}else{ jQuery('.uwp-install-recommend').hide(); jQuery('.uwp-continue-recommend').show(); } } function uwp_wizard_setup_menu($security){ var $menu_id = jQuery( "#uwp_wizard_menu_id" ).val(); var $menu_location = jQuery( "#uwp_wizard_menu_location" ).val(); var data = { 'action': 'uwp_wizard_setup_menu', 'security': $security, 'menu_id': $menu_id, 'menu_location': $menu_location }; jQuery.ajax({ type: "POST", url: uwp_wizard_obj.ajaxurl, data: data, beforeSend: function() { jQuery( ".uwp-wizard-menu-result" ).html('<i class="fas fa-sync fa-spin" style="font-size:18px"></i>'); }, success: function(data) { if(data.data){ jQuery( ".uwp-wizard-menu-result" ).text(data.data); } } }); return false; } function uwp_wizard_setup_dummy_users($security, type) { jQuery('.uwp_dummy_users_button').hide(); jQuery("#uwp_diagnose_add_dummy_users,#uwp_diagnose_remove_dummy_users").html(''); jQuery("#uwp_diagnose_pb_" + type).find('.progressBar').show().progressbar({value: 0}); uwp_wizard_process_diagnose_step( 0, type,$security ); } function uwp_wizard_process_diagnose_step(step, type, security) { jQuery.ajax({ url: uwp_wizard_obj.ajaxurl, type: 'POST', dataType: 'json', data: { action: 'uwp_process_diagnosis', step: step, type: type, security: security, }, beforeSend: function() {}, success: function(response, textStatus, xhr) { if(response.done === true || response.error === true ) { tools_progress(response.percent, type); setTimeout(function(){ jQuery("#uwp_diagnose_pb_" + type).find('.progressBar').hide(); jQuery("#uwp_diagnose_" + type).html(response.message); if( 'add_dummy_users' === type ) { jQuery('.uwp_remove_dummy_users_button').show(); jQuery('.uwp_add_dummy_users_button').hide(); } else{ jQuery('.uwp_add_dummy_users_button').show(); jQuery('.uwp_remove_dummy_users_button').hide(); } }, 1500); } else { setTimeout(function(){ tools_progress(response.percent, type); uwp_wizard_process_diagnose_step(parseInt( response.step ), type,security) }, 500); } }, error: function(xhr, textStatus, errorThrown) { alert(textStatus); } }); } function tools_progress(percent, type) { $element = jQuery("#uwp_diagnose_pb_" + type).find('.progressBar'); var progressBarWidth = percent * $element.width() / 100; $element.find('div').animate({ width: progressBarWidth }, 500).html(percent + "% "); }
jQuery('.uwp-continue-recommend').hide();
core.rs
use crate::lightray_torch::errors::InternalTorchError; use crate::lightray_torch::tensor::read_npy; use base64; use serde::{Deserialize, Serialize}; use std::convert::TryFrom; use tch::IValue; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub enum SerializableIValue { None, Bool(bool), Int(i64), Double(f64), Str(String), Tuple(Vec<SerializableIValue>), List(Vec<SerializableIValue>), Optional(Option<Box<SerializableIValue>>), TensorNPYBase64(String), } impl TryFrom<&IValue> for SerializableIValue { type Error = String; fn try_from(value: &IValue) -> Result<Self, Self::Error> { match value { IValue::None => Ok(SerializableIValue::None), IValue::Bool(bool_value) => Ok(SerializableIValue::Bool(*bool_value)), IValue::Int(int_value) => Ok(SerializableIValue::Int(*int_value)), IValue::Double(double_value) => Ok(SerializableIValue::Double(*double_value)), IValue::String(string_value) => Ok(SerializableIValue::Str(string_value.clone())), IValue::Tuple(tuple_value) => Ok(SerializableIValue::Tuple( tuple_value .iter() .map(SerializableIValue::try_from) .collect::<Result<Vec<SerializableIValue>, String>>()?, )), IValue::GenericList(tuple_value) => Ok(SerializableIValue::List( tuple_value .iter() .map(SerializableIValue::try_from) .collect::<Result<Vec<SerializableIValue>, String>>()?, )), IValue::DoubleList(doubles_value) => Ok(SerializableIValue::List( doubles_value .iter() .map(|x| SerializableIValue::Double(*x)) .collect(), )), IValue::IntList(ints_value) => Ok(SerializableIValue::List( ints_value .iter() .map(|x| SerializableIValue::Int(*x)) .collect(), )), IValue::BoolList(ints_value) => Ok(SerializableIValue::List( ints_value .iter() .map(|x| SerializableIValue::Bool(*x)) .collect(), )), _ => unimplemented!(), } } } impl TryFrom<&SerializableIValue> for IValue { type Error = String; fn try_from(value: &SerializableIValue) -> Result<Self, Self::Error> { match value { SerializableIValue::None => Ok(IValue::None), SerializableIValue::Bool(bool_value) => Ok(IValue::Bool(*bool_value)), SerializableIValue::Int(int_value) => Ok(IValue::Int(*int_value)), SerializableIValue::Double(double_value) => Ok(IValue::Double(*double_value)), SerializableIValue::Str(string_value) => Ok(IValue::String(string_value.clone())), SerializableIValue::Tuple(tuple_value) => Ok(IValue::Tuple( tuple_value .iter() .map(IValue::try_from) .collect::<Result<Vec<IValue>, String>>()?, )), SerializableIValue::List(list_value) => Ok(IValue::GenericList( list_value .iter() .map(IValue::try_from) .collect::<Result<Vec<IValue>, String>>()?, )), SerializableIValue::Optional(optional) => match &optional { Option::None => Ok(IValue::None), Option::Some(x) => Ok(IValue::try_from(&**x)?), }, SerializableIValue::TensorNPYBase64(x) => match &base64::decode(&x) { Result::Ok(byte_array) => Ok(IValue::Tensor(read_npy(byte_array)?)), Result::Err(y) => Err(y.to_string()), }, } } } #[derive(Serialize, Deserialize, Debug)] pub struct TorchScriptInput { pub positional_arguments: Vec<SerializableIValue>, } impl PartialEq for TorchScriptInput { fn eq(&self, other: &TorchScriptInput) -> bool
} pub struct TorchScriptGraph { pub batchable: bool, pub module: tch::CModule, } impl TorchScriptGraph { pub fn forward( &self, inputs: &TorchScriptInput, ) -> Result<SerializableIValue, InternalTorchError> { let model_inputs: Vec<IValue> = inputs .positional_arguments .iter() .map(IValue::try_from) .collect::<Result<Vec<IValue>, String>>()?; let model_output = self.module.forward_is(&model_inputs); match model_output { Result::Ok(true_model_output) => Ok(SerializableIValue::try_from(&true_model_output)?), Result::Err(error) => Err(InternalTorchError { internal_error: error.to_string(), }), } } pub fn forward_batched(&self) { assert!( self.batchable, r#"forward_batched can only be called on batchable TorchScriptGraph's"# ); todo!() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_serialization() { let torchscript_input = TorchScriptInput { positional_arguments: vec![ SerializableIValue::List(vec![ SerializableIValue::Str("<bos>".to_string()), SerializableIValue::Str("call".to_string()), SerializableIValue::Str("mom".to_string()), SerializableIValue::Str("<eos>".to_string()), ]), SerializableIValue::Bool(true), SerializableIValue::Int(3), SerializableIValue::Int(3), ], }; let serialized = serde_json::to_string(&torchscript_input).unwrap(); let unserialized: TorchScriptInput = serde_json::from_str(&serialized).unwrap(); assert_eq!(torchscript_input, unserialized) } }
{ if self.positional_arguments.len() != other.positional_arguments.len() { return false; } for i in 0..self.positional_arguments.len() { if self.positional_arguments[i] != other.positional_arguments[i] { return false; } } true }
lib.rs
use jni::JNIEnv; use jni::objects::{JClass, JString}; use jni::sys::{jfloat, jfloatArray, jint, jintArray}; use real_hora::core::ann_index::SerializableIndex; use std::collections::HashMap; use std::sync::Mutex; #[macro_use] extern crate lazy_static; trait ANNIndexer: real_hora::core::ann_index::ANNIndex<f32, usize> + real_hora::core::ann_index::SerializableIndex<f32, usize> { } impl ANNIndexer for real_hora::index::bruteforce_idx::BruteForceIndex<f32, usize> {} pub fn metrics_transform(s: &str) -> real_hora::core::metrics::Metric { match s { "angular" => real_hora::core::metrics::Metric::Angular, "manhattan" => real_hora::core::metrics::Metric::Manhattan, "dot_product" => real_hora::core::metrics::Metric::DotProduct, "euclidean" => real_hora::core::metrics::Metric::Euclidean, "cosine_similarity" => real_hora::core::metrics::Metric::CosineSimilarity, _ => real_hora::core::metrics::Metric::Unknown, } } lazy_static! { static ref ANN_INDEX_MANAGER: Mutex<HashMap<String, Box<dyn ANNIndexer>>> = Mutex::new(HashMap::new()); } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_new_1bf_1index( env: JNIEnv, _class: JClass, name: JString, dimension: jint, ) { let idx_name: String = env.get_string(name).unwrap().into(); let idx_dimension = dimension as usize; ANN_INDEX_MANAGER.lock().unwrap().insert( idx_name, Box::new(real_hora::index::bruteforce_idx::BruteForceIndex::< f32, usize, >::new( idx_dimension, &real_hora::index::bruteforce_params::BruteForceParams::default(), )), ); } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_add( env: JNIEnv, _class: JClass, name: JString, features: jfloatArray, features_idx: jint, ) { let idx_name: String = env.get_string(name).unwrap().into(); let idx = features_idx as usize; let length = env.get_array_length(features).unwrap() as usize; let mut buf: Vec<jfloat> = vec![0.0; length]; env.get_float_array_region(features, 0, &mut buf).unwrap(); match &mut ANN_INDEX_MANAGER.lock().unwrap().get_mut(&idx_name) { Some(index) => { let n = real_hora::core::node::Node::new_with_idx(&buf, idx); index.add_node(&n).unwrap(); } None => {} } } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_build( env: JNIEnv, _class: JClass, name: JString, mt: JString, ) { let idx_name: String = env.get_string(name).unwrap().into(); let metric: String = env.get_string(mt).unwrap().into(); match &mut ANN_INDEX_MANAGER.lock().unwrap().get_mut(&idx_name) { Some(index) => { index.build(metrics_transform(&metric)).unwrap(); } None => {} } } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_search( env: JNIEnv, _class: JClass, name: JString, k: jint, features: jfloatArray, ) -> jintArray { let idx_name: String = env.get_string(name).unwrap().into(); let length = env.get_array_length(features).unwrap() as usize; let mut buf: Vec<jfloat> = vec![0.0; length]; env.get_float_array_region(features, 0, &mut buf).unwrap(); let topk = k as usize; let mut result: Vec<i32> = Vec::new(); if let Some(index) = ANN_INDEX_MANAGER.lock().unwrap().get(&idx_name) { result = index.search(&buf, topk).iter().map(|x| *x as i32).collect(); } let output = env.new_int_array(result.len() as i32).unwrap(); env.set_int_array_region(output, 0, &result).unwrap(); output } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_load( env: JNIEnv, _class: JClass, name: JString, _file_path: JString, ) { let idx_name: String = env.get_string(name).unwrap().into(); let file_path: String = env.get_string(name).unwrap().into(); ANN_INDEX_MANAGER.lock().unwrap().insert( idx_name, Box::new( real_hora::index::bruteforce_idx::BruteForceIndex::<f32, usize>::load( &file_path, &real_hora::core::arguments::Args::new(), ) .unwrap(), ), ); } #[no_mangle] pub extern "system" fn Java_com_hora_app_ANNIndex_dump( env: JNIEnv, _class: JClass, name: JString, _file_path: JString, ) { let idx_name: String = env.get_string(name).unwrap().into();
if let Some(index) = ANN_INDEX_MANAGER.lock().unwrap().get_mut(&idx_name) { index .dump(&file_path, &real_hora::core::arguments::Args::new()) .unwrap(); } }
let file_path: String = env.get_string(name).unwrap().into();
TShirt.tsx
import React, { forwardRef } from "react"; import { IconWeight, IconProps, PaintFunction, renderPathForWeight, } from "../lib"; import IconBase, { RenderFunction } from "../lib/IconBase"; const pathsByWeight = new Map<IconWeight, PaintFunction>(); pathsByWeight.set("bold", (color: string) => ( <> <path d="M192,120h26.66667a8,8,0,0,0,7.38461-4.92308l15.32021-36.76848a8,8,0,0,0-3.19184-9.89019L192,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="24" /> <path d="M64,120H37.33333a8,8,0,0,1-7.38461-4.92308L14.62851,78.30844a8,8,0,0,1,3.19184-9.89019L64,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="24" /> <path d="M160,40a32,32,0,0,1-64,0H64V208a8,8,0,0,0,8,8H184a8,8,0,0,0,8-8V40Z" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="24" /> </> )); pathsByWeight.set("duotone", (color: string) => ( <> <path d="M192,120h27.05573a8,8,0,0,0,7.15542-4.42229l18.40439-36.80878a8,8,0,0,0-3.18631-10.52366L192,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> <path d="M64,120H36.94427a8,8,0,0,1-7.15542-4.42229L11.38446,78.76893a8,8,0,0,1,3.18631-10.52366L64,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> <path d="M192,120h27.05573a8,8,0,0,0,7.15542-4.42229l18.40439-36.80878a8,8,0,0,0-3.18631-10.52366L192,40Z" opacity="0.2" /> <path d="M64,120H36.94427a8,8,0,0,1-7.15542-4.42229L11.38446,78.76893a8,8,0,0,1,3.18631-10.52366L64,40Z" opacity="0.2" /> <path d="M160,40a32,32,0,0,1-64,0H64V208a8,8,0,0,0,8,8H184a8,8,0,0,0,8-8V40Z" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> </> )); pathsByWeight.set("fill", () => ( <> <path d="M245.39844,61.29932,195.96924,33.0542c-.01953-.01123-.04-.019-.05957-.03027-.15833-.08887-.32215-.16846-.48694-.24659-.07752-.03686-.15393-.07763-.23206-.11181-.14917-.06494-.30322-.12012-.45691-.176-.09912-.03638-.19726-.07666-.29724-.10888-.11938-.03809-.24243-.06714-.36425-.09986-.13868-.03735-.27662-.07739-.41614-.10693-.08631-.01807-.17493-.0293-.26209-.04468-.1781-.03149-.35583-.06226-.53442-.08154-.07031-.00757-.142-.00928-.21265-.0149-.19653-.01586-.39282-.02856-.58911-.02978-.01953,0-.03833-.00293-.05786-.00293H160a7.99977,7.99977,0,0,0-8,8,24,24,0,0,1-48,0,7.99977,7.99977,0,0,0-8-8H64c-.02576,0-.05054.00366-.07629.00391-.16809.00146-.33606.01318-.50428.02539-.09814.00708-.19726.01025-.29443.02075-.15137.0166-.302.0437-.453.06909-.11365.019-.2284.03394-.34046.05786-.11706.0249-.23291.05933-.34936.08985-.14331.03735-.28736.072-.42749.11694-.08545.02759-.16931.0625-.25415.09326-.16724.05982-.334.12012-.49573.19067-.07251.03174-.14343.06983-.21545.10376-.16907.08008-.33692.16163-.49927.25269-.01953.011-.0398.0188-.05933.03L10.60107,61.29932A16.00794,16.00794,0,0,0,4.229,82.34668l18.4043,36.80811A15.91231,15.91231,0,0,0,36.94434,128H56v80a16.01833,16.01833,0,0,0,16,16H184a16.01833,16.01833,0,0,0,16-16V128h19.05566a15.91146,15.91146,0,0,0,14.31055-8.84473l18.40527-36.80908A16.00784,16.00784,0,0,0,245.39844,61.29932ZM36.94434,112,18.54,75.19092,56,53.7854V112Zm182.11132,0H200V53.7854l37.46045,21.406Z" /> </> )); pathsByWeight.set("light", (color: string) => ( <> <path d="M192,120h27.05573a8,8,0,0,0,7.15542-4.42229l18.40439-36.80878a8,8,0,0,0-3.18631-10.52366L192,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="12" /> <path d="M64,120H36.94427a8,8,0,0,1-7.15542-4.42229L11.38446,78.76893a8,8,0,0,1,3.18631-10.52366L64,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="12" /> <path d="M160,40a32,32,0,0,1-64,0H64V208a8,8,0,0,0,8,8H184a8,8,0,0,0,8-8V40Z" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="12" /> </> )); pathsByWeight.set("thin", (color: string) => ( <> <path d="M192,120h27.05573a8,8,0,0,0,7.15542-4.42229l18.40439-36.80878a8,8,0,0,0-3.18631-10.52366L192,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="8" /> <path d="M64,120H36.94427a8,8,0,0,1-7.15542-4.42229L11.38446,78.76893a8,8,0,0,1,3.18631-10.52366L64,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="8" /> <path d="M160,40a32,32,0,0,1-64,0H64V208a8,8,0,0,0,8,8H184a8,8,0,0,0,8-8V40Z" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="8" /> </> )); pathsByWeight.set("regular", (color: string) => ( <> <path d="M192,120h27.05573a8,8,0,0,0,7.15542-4.42229l18.40439-36.80878a8,8,0,0,0-3.18631-10.52366L192,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> <path d="M64,120H36.94427a8,8,0,0,1-7.15542-4.42229L11.38446,78.76893a8,8,0,0,1,3.18631-10.52366L64,40" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> <path d="M160,40a32,32,0,0,1-64,0H64V208a8,8,0,0,0,8,8H184a8,8,0,0,0,8-8V40Z" fill="none" stroke={color} strokeLinecap="round" strokeLinejoin="round" strokeWidth="16" /> </> )); const renderPath: RenderFunction = (weight: IconWeight, color: string) => renderPathForWeight(weight, color, pathsByWeight); const TShirt = forwardRef<SVGSVGElement, IconProps>((props, ref) => ( <IconBase ref={ref} {...props} renderPath={renderPath} /> )); TShirt.displayName = "TShirt";
export default TShirt;
ThresholdingOutputStream.rs
org.apache.commons.io.output.ThresholdingOutputStream
index_delete.go
// Copyright (c) 2014 Couchbase, Inc. // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file // except in compliance with the License. You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software distributed under the // License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing permissions // and limitations under the License. package http import ( "fmt" "net/http" "os" ) type DeleteIndexHandler struct { basePath string IndexNameLookup varLookupFunc } func
(basePath string) *DeleteIndexHandler { return &DeleteIndexHandler{ basePath: basePath, } } func (h *DeleteIndexHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { // find the name of the index to delete var indexName string if h.IndexNameLookup != nil { indexName = h.IndexNameLookup(req) } if indexName == "" { showError(w, req, "index name is required", 400) return } indexToDelete := UnregisterIndexByName(indexName) if indexToDelete == nil { showError(w, req, fmt.Sprintf("no such index '%s'", indexName), 404) return } // close the index indexToDelete.Close() // now delete it err := os.RemoveAll(h.indexPath(indexName)) if err != nil { showError(w, req, fmt.Sprintf("error deleting index: %v", err), 500) return } rv := struct { Status string `json:"status"` }{ Status: "ok", } mustEncode(w, rv) } func (h *DeleteIndexHandler) indexPath(name string) string { return h.basePath + string(os.PathSeparator) + name }
NewDeleteIndexHandler
portal.js
function module_initPORTAL() { patientFile_loadScreen({portal:true}); $(document).attr('title', app_practiceClientProperties['portal.app.title']); app_viewStack('signin-screen', DO_SCROLL); $('#portal-welcome-screen-image').attr('src', 'assets/images/practice/'+PRACTICE+'/'+app_practiceClientProperties['portal.app.main_image']); $('.portal.practice-logo').attr('src', 'assets/images/practice/'+PRACTICE+'/'+app_practiceClientProperties['portal.app.practice_logo_sm']); $('#app-signin-submit').click(function(){ app_login($(this).data('client-type')); }); $('#app-change-patient').click(function() { portal_patientSearchDialog(); }); $('#app-retrieve-credentials').click(function(){ app_showCredentialsRecoveryDialog(); }); $('#dashboard-screen-btn').click(function(){portal_dashboardScreen()}); $('#app-intake-forms-panel-btn').click(function(){portal_intakeFormsScreen()}); $('#app-family-records-panel-btn').click(function(){portal_rxRenewalScreen()}); $('#app-letters-panel-btn').click(function(){portal_lettersScreen()}); $('#app-messages-panel-btn').click(function(){portal_messagesScreen()}); $('#app-files-panel-btn').click(function(){portal_filesScreen()}); $('#app-forms-panel-btn').click(function(){portal_formsScreen()}); $('#app-payments-panel-btn').click(function(){portal_paymentsScreen()}); $('#app-appointments-panel-btn').click(function(){portal_appointmentsScreen()}); $('#app-send-message-panel-btn').click(function(){portal_sendMessageScreen()}); $('#app-settings-panel-btn').click(function(){portal_settingsScreen()}); $('#app-logout-submit').click(function(e){e.preventDefault();app_logout();}); $('#send-message-submit').click(function(e){portal_sendMessageToProvider();}); $('#app-rx-request-btn').click(function(e){portal_rxRequest_clearForm();}); $('#rx-request-submit').click(function(e){portal_rxRequest();}); $('#appt-request-from').datepicker(); $('#appt-request-to').datepicker(); var fromOffice = $.QueryString["fromOffice"]; if (fromOffice == "true") { var tempSessionId = $.QueryString["tempSessionId"]; app_module = $.QueryString["module"]; portal_validateFromOffice(tempSessionId); } var activateUser = $.QueryString["activateUser"]; if (activateUser == "true") { var activationCode = $.QueryString["activationCode"]; app_logout(function(){ portal_validateViaActivation(activationCode); }) } var passwordRecovery = $.QueryString["passwordRecovery"]; if (passwordRecovery == "true") { var recoveryCode = $.QueryString["recoveryCode"]; portal_validateViaRecovery(recoveryCode); } setupAppointmentScreen(); } function portal_patientSearchDialog(callback) { $(".modal-backdrop").remove(); var args = { title: "Patient Select", list: app_patients } RenderUtil.render('dialog/pot/guardian_patient_select', args, function(s) { $('#modal-patient-select').remove(); var $template = $(s); $('#modals-placement').html($template); $('#modal-patient-select').modal('show'); $template.find('#guardian-patients').on('change', function(){ app_patientId = $(this).val(); $('#modal-patient-select').modal('hide'); portal_getPatient(); }) });
var jsonData = _.extend({ module:app_module, id: app_guardian.id, sessionId: app_getSessionId() }, filter); app_post("patient/getGuardianPatients", jsonData, function(parsedData){ callback(parsedData); }) } function portal_appointmentsScreen() { app_viewStack('appointments-screen', DO_SCROLL); app_loadCalendar(); } function portal_buildFormControls() { portal_getPatientClinicians(); } function portal_showNoPatientsDialog(){ dialog({ modalTitle:"No clients", modalBodyText:"You do not have any clients.", okButton:"Ok" }); } function portal_setClientInfo() { app_patientFullName = util_buildFullName(app_patient.firstName, app_patient.middleName, app_patient.lastName); $('#dashboard-patient-full-name').html(app_patientFullName); app_patientProfileImage = app_getPatientProfileImagePath(); $('#dashboard-patient-profile-photo').attr('src', app_patientProfileImage); var jsonData = { id: app_patient.id, sessionId: app_getSessionId() }; app_post("patient/getClientInfo", jsonData, function(parsedData) { var recentActivity = parsedData.recentActivity; RenderUtil.render('portal/dashboard/recent_activity', {clientId: app_patient.id, recentActivity: recentActivity}, function(s) { $("#dashboard-recent-activity").html(s); }); var personalInfo = parsedData.personalInfo; RenderUtil.render('portal/dashboard/your_information', {personalInfo: personalInfo}, function(s) { $("#dashboard-your-information").html(s); }); }) } function portal_dashboardScreen() { app_viewStack('dashboard-screen', DO_SCROLL); portal_setClientInfo(); } function portal_filesScreen() { patientFile_viewPatientFiles(); } function portal_formsScreen() { app_viewStack('portal-forms-screen', DO_SCROLL); app_getPatientForms(); } function portal_getAppLists() { var jsonData = JSON.stringify({ sessionId: app_getSessionId(), clientType:app_clientType}); $.post("app/getAppLists", {data:jsonData}, function(data) { var parsedData = $.parseJSON(data); if (!util_checkSessionResponse(parsedData)) return false; var appLists = parsedData.appLists; app_patientClinicians = appLists.patientClinicians; }); } function portal_getPatientClinicians() { var jsonData = JSON.stringify({ id: app_client.id, sessionId: app_getSessionId() }); $.post("patient/getPatientClinicians", {data:jsonData}, function(data) { var parsedData = $.parseJSON(data); if (!util_checkSessionResponse(parsedData)) return false; app_patientClinicians = parsedData.list; RenderUtil.render('component/patient_clinician_select_options', {options:app_patientClinicians}, function(s) { $(".app-patient-clinicians-select").html(s); }); }); } function portal_intakeFormsScreen() { app_renderPatientIntakeScreen(); app_viewStack('intake-forms-screen', DO_SCROLL); } function portal_lettersScreen() { app_viewStack('letters-screen', DO_SCROLL); portal_getPatientLetters(); } function portal_messagesScreen() { app_viewStack('messages-screen', DO_SCROLL); portal_getPatientMessages(); } function portal_rxRenewalScreen() { app_viewStack('rx-renewal-screen', DO_SCROLL); } function portal_settingsScreen() { app_viewStack('settings-screen', DO_SCROLL); $('#patient-photo').attr('src', app_getPatientProfileImagePath()) app_patientId = app_patient.id; var sizeLimit = 1000 * 1024; app_setupPictureUpload(undefined, { sizeLimit: sizeLimit }); var $passwordForm = $('#settings-password-form'); var $password = $passwordForm.find('#password'); var $passwordConfirm = $passwordForm.find('#password_confirmation'); $('#password-form-submit').on('click', function(){ if (app_validatePasswordForm($password, $passwordConfirm)) { var jsonData = JSON.stringify({ sessionId: app_getSessionId(), id: app_patient.id, password: $password.val()}); app_post('patient/updatePassword', jsonData, function(parsedData) { util_clearItemError($password); if (parsedData.returnCode == RETURN_CODE_VALID) { app_displayNotification('Password successfully changed'); } else { if (parsedData.returnCode == RETURN_CODE_INVALID_PASSWORD) { util_showError($password, parsedData.errorMsg); } } }); } }) } function portal_sendMessageScreen() { app_viewStack('send-message-screen', DO_SCROLL); $('#send-message-clinician').val(''); $('#send-message-subject').val(''); $('#send-message-message').val(''); util_clearErrors(); } function portal_validateFromOffice(sessionId) { app_viewStack('signin-screen', DO_SCROLL); var jsonData = JSON.stringify({sessionId: sessionId, module:app_module }); $.post("patient/validateFromOffice", {data:jsonData}, function(data) { var parsedData = $.parseJSON(data); if (!util_checkSessionResponse(parsedData)) return false; app_client = parsedData.client; app_patient = app_client; if (app_client.authStatus == CLIENT_STATUS_AUTHORIZED) { app_clientFullName = util_buildFullName(app_client.firstName, app_client.middleName, app_client.lastName); app_notificationText = app_clientFullName + ' logged in.'; portal__setPatientApptName(); $('.home-today').html(dateFormat("fullDate")); app_runIdleTimer(); portal_dashboardScreen(); if (app_client.intakeClosed == false) { app_notificationText = app_clientFullName + ' ready for activation.'; app_renderPatientIntakeScreen(); } else { portal_buildFormControls(); } } else { if (app_client.authStatus == CLIENT_STATUS_NOT_FOUND) { app_notificationText = 'User not found in system'; } else if (app_client.authStatus == CLIENT_STATUS_INVALID_PASSWORD) { app_notificationText = 'Invalid password'; } else if (app_client.authStatus == CLIENT_STATUS_INACTIVE) { app_notificationText = 'User is inactive'; } } app_displayNotification(app_notificationText); }); } function portal_getPatient() { app_getPatient(function(){ portal_patientInit(); }); } function portal__setPatientApptName(){ var patientName=app_clientFullName; if(app_client.mrn){ patientName+= "[" + app_client.mrn + "]"; } $('.app-patient-appt-name').text(patientName); } function portal_patientInit() { app_clientFullName = util_buildFullName(app_client.firstName, app_client.middleName, app_client.lastName); portal__setPatientApptName(); if (app_patient.intakeClosed == false) { app_renderPatientIntakeScreen(); } else { portal_dashboardScreen(); app_displayNotification(app_notificationText); } portal_buildFormControls(); } function portal_onLogin() { app_runIdleTimer(); $('.home-today').html(dateFormat("fullDate")); app_clientFullName = util_buildFullName(app_client.firstName, app_client.middleName, app_client.lastName); app_notificationText = app_clientFullName + ' logged in.'; if (app_clientType == "patient") { app_patient = app_client; portal_patientInit(); } else if (app_clientType == "guardian") { portal_getGuardianPatients(function(parsedData) { app_patients = parsedData.list; var numPatients = app_patients.length $('#app-change-patient').parent().hide() if (numPatients > 1) { $('#app-change-patient').parent().show() portal_patientSearchDialog(); } else if (numPatients==1) { app_patient = app_client = app_patients[0]; portal_patientInit(); } else if(numPatients==0) { portal_showNoPatientsDialog(); } }) } } function portal_checkRenderPasswordForm(option={}, callback) { if (app_guardian) { if (app_guardian.passwordCreated != true) { app_renderPasswordForm(option, callback); return; } } else { if (app_client.passwordCreated != true) { app_renderPasswordForm(_.extend(option, {isPatient: true}), callback); return; } } } function portal_validateViaActivation(activationCode) { var notificationText = ''; app_viewStack('signin-screen', DO_SCROLL); var jsonData = JSON.stringify({activationCode: activationCode, module:app_module }); app_post("patient/validateViaActivation", jsonData, function(parsedData) { app_setClient(parsedData); if (app_client.authStatus == CLIENT_STATUS_AUTHORIZED) { portal_checkRenderPasswordForm({}, portal_onLogin); } else { if (app_client.authStatus == CLIENT_STATUS_ACTIVATION_CODE_EXPIRED) { app_notificationText = "Activation code expired."; } else if (app_client.authStatus == CLIENT_STATUS_ACTIVATION_CODE_ALREADY_USED) { app_notificationText = "Activation code has already been used."; } else if (app_client.authStatus == CLIENT_STATUS_PASSWORD_ALREADY_CREATED) { app_notificationText = "Please login"; } else if (app_client.authStatus == CLIENT_STATUS_NOT_FOUND) { app_notificationText = 'User not found in system'; } else if (app_client.authStatus == CLIENT_STATUS_INVALID_PASSWORD) { app_notificationText = 'Invalid password'; } else if (app_client.authStatus == CLIENT_STATUS_INACTIVE) { app_notificationText = 'User is inactive'; } app_displayNotification(app_notificationText); } }); } function portal_validateViaRecovery(recoveryCode) { var notificationText = ''; app_viewStack('signin-screen', DO_SCROLL); var jsonData = {recoveryCode: recoveryCode, module:app_module }; app_post("patient/validateViaRecovery", jsonData, function(parsedData) { app_setClient(parsedData); if (app_client.authStatus == CLIENT_STATUS_AUTHORIZED) { portal_checkRenderPasswordForm({mode:PASSWORD_RESET}, portal_onLogin); } else { if (app_client.authStatus == CLIENT_STATUS_RECOVERY_CODE_ALREADY_USED) { app_notificationText = "Recovery code has already been used."; } else if (app_client.authStatus == CLIENT_STATUS_RECOVERY_CODE_EXPIRED) { app_notificationText = "Recovery code expired. Please proceed to reset your password."; } else if (app_client.authStatus == CLIENT_STATUS_PASSWORD_ALREADY_CREATED) { app_notificationText = "Please login"; } else if (app_client.authStatus == CLIENT_STATUS_NOT_FOUND) { app_notificationText = 'User not found in system'; } else if (app_client.authStatus == CLIENT_STATUS_INVALID_PASSWORD) { app_notificationText = 'Invalid password'; } else if (app_client.authStatus == CLIENT_STATUS_INACTIVE) { app_notificationText = 'User is inactive'; } app_displayNotification(app_notificationText); } }); }
} function portal_getGuardianPatients(callback, filter={}) {
CSV.py
import re import numpy as np from openpnm.io.Pandas import Pandas from openpnm.io import GenericIO, Dict from openpnm.utils import logging, Workspace logger = logging.getLogger(__name__) ws = Workspace() class CSV(GenericIO): r""" Reads and writes CSV (comma-separated-value files) containing pore and throat data Notes ----- There are a few rules governing how the data is be stored: 1. The first row of the file (column headers) must contain the property names. The subsequent rows contain the data. 2. The property names should be in the usual OpenPNM format, such as of ``pore.volume`` or ``throat.surface_area``. 3. Each column represents a specific property. For Np x 1 or Nt x 1 data such as *pore.volume* this is straightforward. For Np x *m* or Nt x *m* data, each of the *m* columns should have their own column in in the CSV file, with a numpy-style index indicating which axis it corresponds to. For instance, the *pore.coords* values should be stored as three separate columns with the headings: *pore.coords[0]*, *pore.coords[1]*, and *pore.coords[2]*. OpenPNM will convert that back into an Np x *m* array upon loading. 4. The file can contain both or either pore and throat data. 5. Labels can be imported by placing the characters TRUE and FALSE in a column corresponding to the label name (i.e. *pore.front*). TRUE indicates where the label applies and FALSE otherwise. """ @classmethod def save(cls, *args, **kwargs): r""" This method is to be deprecated. Use ``export_data`` instead. """ cls.export_data(*args, **kwargs) @classmethod def export_data(cls, network=None, phases=[], filename='', delim=' | '): r""" Save all the pore and throat property data on the Network (and optionally on any Phases objects) to CSV files. Parameters ---------- network : OpenPNM Network The Network containing the data to be stored phases : list of OpenPNM Phases (optional) The Phases whose data should be stored. filename : string or path object The name of the file to store the data Notes ----- The data from all Geometry objects is added to the file automatically. """ project, network, phases = cls._parse_args(network=network, phases=phases) df = Pandas.to_dataframe(network=network, phases=phases, join=True, delim=delim) # Write to file if filename == '': filename = project.name fname = cls._parse_filename(filename=filename, ext='csv') df.to_csv(fname, index=False) @classmethod def load(cls, *args, **kwargs): r""" This method will be deprecated. Use ``import_data`` instead. """ proj = cls.import_data(*args, **kwargs) return proj @classmethod def import_data(cls, filename, project=None, delim=' | '): r""" Opens a 'csv' file, reads in the data, and adds it to the **Network** Parameters ---------- filename : string (optional) The name of the file containing the data to import. The formatting of this file is outlined below. project : OpenPNM Project object A GenericNetwork is created and added to the specified Project. If no Project object is supplied then one will be created and returned. Returns -------
project : list An OpenPNM project containing the data assigned to Generic versions of the objects from which it was exported. """ from pandas import read_table if project is None: project = ws.new_project() fname = cls._parse_filename(filename, ext='csv') a = read_table(filepath_or_buffer=fname, sep=',', skipinitialspace=True, index_col=False, true_values=['T', 't', 'True', 'true', 'TRUE'], false_values=['F', 'f', 'False', 'false', 'FALSE']) dct = {} # First parse through all the items and re-merge columns keys = sorted(list(a.keys())) for item in keys: m = re.search(r'\[.\]', item) # The dot '.' is a wildcard if m: # m is None if pattern not found, otherwise merge cols pname = re.split(r'\[.\]', item)[0] # Get base propname # Find all other keys with same base propname merge_keys = [k for k in a.keys() if k.startswith(pname)] # Rerieve and remove arrays with same base propname merge_cols = [a.pop(k) for k in merge_keys] # Merge arrays into multi-column array and store in DataFrame dct[pname] = np.vstack(merge_cols).T # Remove key from list of keys for k in keys: if k.startswith(pname): keys.pop(keys.index(k)) else: dct[item] = np.array(a.pop(item)) project = Dict.from_dict(dct, project=project, delim=delim) return project
auth.test.ts
import { test, expect } from "vitest" import { testenv } from "./testenv" import { v4 as uuid } from "uuid" test("sign up for Sprachy", async () => { const asRando = await testenv.asRando() const email = `twodork+${uuid()}@sprachy.com` await asRando.api.signUp({ email: email, password: "yuhyuhyuhyuh", confirmPassword: "yuhyuhyuhyuh",
expect(summary.user.email).toBe(email) // Try logging out await asRando.api.logout() let error: any = null try { await asRando.api.getProgress() } catch (err: any) { error = err } expect(error).not.toBe(null) // And logging in again await asRando.api.login({ email: email, password: "yuhyuhyuhyuh" }) const summary2 = await asRando.api.getProgress() expect(summary2.user.email).toBe(email) })
wantsReminderEmails: false }) const summary = await asRando.api.getProgress()
webpack.dev.conf.js
var config = require('../config') var webpack = require('webpack') var merge = require('webpack-merge') var utils = require('./utils') var baseWebpackConfig = require('./webpack.base.conf') var HtmlWebpackPlugin = require('html-webpack-plugin') // add hot-reload related code to entry chunks Object.keys(baseWebpackConfig.entry).forEach(function (name) { baseWebpackConfig.entry[name] = ['./client/build/dev-client'].concat(baseWebpackConfig.entry[name]) }) module.exports = merge(baseWebpackConfig, { output: { publicPath: config.dev.assetsPublicPath, }, module: { loaders: utils.styleLoaders() }, // eval-source-map is faster for development devtool: '#eval-source-map', plugins: [ new webpack.DefinePlugin({ 'process.env': config.dev.env }), // https://github.com/glenjamin/webpack-hot-middleware#installation--usage new webpack.optimize.OccurenceOrderPlugin(), new webpack.HotModuleReplacementPlugin(), new webpack.NoErrorsPlugin(), // https://github.com/ampedandwired/html-webpack-plugin new HtmlWebpackPlugin({ favicon:'./client/src/assets/img/fav.ico', //favicon路径 filename: 'index.html', template: './client/index.html',
] })
inject: true })
SnackbarContent.js
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports.styles = undefined; var _extends2 = require('babel-runtime/helpers/extends'); var _extends3 = _interopRequireDefault(_extends2); var _objectWithoutProperties2 = require('babel-runtime/helpers/objectWithoutProperties'); var _objectWithoutProperties3 = _interopRequireDefault(_objectWithoutProperties2); var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _react = require('react'); var _react2 = _interopRequireDefault(_react); var _propTypes = require('prop-types'); var _propTypes2 = _interopRequireDefault(_propTypes); var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _withStyles = require('../styles/withStyles'); var _withStyles2 = _interopRequireDefault(_withStyles); var _Paper = require('../Paper'); var _Paper2 = _interopRequireDefault(_Paper); var _Typography = require('../Typography'); var _Typography2 = _interopRequireDefault(_Typography); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } // @inheritedComponent Paper var styles = exports.styles = function styles(theme) { var _root; var reverseType = theme.palette.type === 'light' ? 'dark' : 'light'; var backgroundColor = theme.palette.types[reverseType].background.default; return { root: (_root = { pointerEvents: 'initial', color: theme.palette.getContrastText(backgroundColor), backgroundColor: backgroundColor, display: 'flex', alignItems: 'center', flexWrap: 'wrap', padding: '6px ' + theme.spacing.unit * 3 + 'px' }, (0, _defineProperty3.default)(_root, theme.breakpoints.up('md'), { minWidth: 288, maxWidth: 568, borderRadius: 2 }), (0, _defineProperty3.default)(_root, theme.breakpoints.down('sm'), { flexGrow: 1 }), _root), message: { padding: theme.spacing.unit + 'px 0' }, action: { display: 'flex', alignItems: 'center', marginLeft: 'auto', paddingLeft: theme.spacing.unit * 3, marginRight: -theme.spacing.unit } }; }; function SnackbarContent(props) { var action = props.action, classes = props.classes, className = props.className, message = props.message, other = (0, _objectWithoutProperties3.default)(props, ['action', 'classes', 'className', 'message']); return _react2.default.createElement( _Paper2.default,
component: _Typography2.default, headlineMapping: { body1: 'div' }, role: 'alertdialog', square: true, elevation: 6, className: (0, _classnames2.default)(classes.root, className) }, other), _react2.default.createElement( 'div', { className: classes.message }, message ), action ? _react2.default.createElement( 'div', { className: classes.action }, action ) : null ); } SnackbarContent.propTypes = process.env.NODE_ENV !== "production" ? { /** * The action to display. */ action: _propTypes2.default.node, /** * Useful to extend the style applied to components. */ classes: _propTypes2.default.object.isRequired, /** * @ignore */ className: _propTypes2.default.string, /** * The message to display. */ message: _propTypes2.default.node } : {}; exports.default = (0, _withStyles2.default)(styles, { name: 'MuiSnackbarContent' })(SnackbarContent);
(0, _extends3.default)({
nz-block-scroll-strategy.d.ts
import { ScrollStrategy } from '@angular/cdk/overlay'; import { Renderer2 } from '@angular/core'; import { NzMeasureScrollbarService } from '../../services/nz-measure-scrollbar.service'; export declare class
implements ScrollStrategy { private document; private renderer; private nzMeasureScrollbarService; constructor(document: Document, renderer: Renderer2, nzMeasureScrollbarService: NzMeasureScrollbarService); attach(): void; enable(): void; disable(): void; }
NzBlockScrollStrategy
annotation_key_constants.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file should be consistent with pkg/api/v1/annotation_key_constants.go. package core const ( // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy // webhook backend fails. ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" // MirrorPodAnnotationKey represents the annotation key set by kubelets when creating mirror pods MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" // TolerationsAnnotationKey represents the key of tolerations data (json serialized) // in the Annotations of a Pod. TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" // TaintsAnnotationKey represents the key of taints data (json serialized) // in the Annotations of a Node. TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" // SeccompPodAnnotationKey represents the key of a seccomp profile applied // to all containers of a pod. // Deprecated: set a pod security context `seccompProfile` field. SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied // to one container of a pod. // Deprecated: set a container security context `seccompProfile` field.
SeccompProfileRuntimeDefault string = "runtime/default" // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead. DeprecatedSeccompProfileDockerDefault string = "docker/default" // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" // ObjectTTLAnnotationKey represents a suggestion for kubelet for how long it can cache // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" // NonConvertibleAnnotationPrefix annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" kubectlPrefix = "kubectl.kubernetes.io/" // LastAppliedConfigAnnotation is the annotation used to store the previous // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers // // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow // access only from the CIDRs currently allocated to MIT & the USPS. // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') // of the last change, of some Pod or Service object, that triggered the endpoints object change. // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints // controller at T1, and the Endpoints object was changed at T2, the // EndpointsLastChangeTriggerTime would be set to T0. // // The "endpoints change trigger" here means any Pod or Service change that resulted in the // Endpoints object change. // // Given the definition of the "endpoints change trigger", please note that this annotation will // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's // already set). // // This annotation will be used to compute the in-cluster network programming latency SLI, see // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or // CSI Backend for a volume plugin on a specific node. MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" // PodDeletionCost can be used to set to an int32 that represent the cost of deleting // a pod compared to other pods belonging to the same ReplicaSet. Pods with lower // deletion cost are preferred to be deleted before pods with higher deletion cost. // Note that this is honored on a best-effort basis, and so it does not offer guarantees on // pod deletion order. // The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted. // // This annotation is alpha-level and is only honored when PodDeletionCost feature is enabled. PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost" )
SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. // Deprecated: set a pod or container security context `seccompProfile` of type "RuntimeDefault" instead.
0002_alter_user_options.py
# Generated by Django 3.2 on 2021-05-05 06:00 from django.db import migrations class Migration(migrations.Migration):
] operations = [ migrations.AlterModelOptions( name='user', options={'verbose_name': '用户管理', 'verbose_name_plural': '用户管理'}, ), ]
dependencies = [ ('users', '0001_initial'),
coercion.rs
//! # Type Coercion //! //! Under certain circumstances we will coerce from one type to another, //! for example by auto-borrowing. This occurs in situations where the //! compiler has a firm 'expected type' that was supplied from the user, //! and where the actual type is similar to that expected type in purpose //! but not in representation (so actual subtyping is inappropriate). //! //! ## Reborrowing //! //! Note that if we are expecting a reference, we will *reborrow* //! even if the argument provided was already a reference. This is //! useful for freezing mut things (that is, when the expected type is &T //! but you have &mut T) and also for avoiding the linearity //! of mut things (when the expected is &mut T and you have &mut T). See //! the various `src/test/ui/coerce/*.rs` tests for //! examples of where this is useful. //! //! ## Subtle note //! //! When infering the generic arguments of functions, the argument //! order is relevant, which can lead to the following edge case: //! //! ```rust //! fn foo<T>(a: T, b: T) { //! // ... //! } //! //! foo(&7i32, &mut 7i32); //! // This compiles, as we first infer `T` to be `&i32`, //! // and then coerce `&mut 7i32` to `&7i32`. //! //! foo(&mut 7i32, &7i32); //! // This does not compile, as we first infer `T` to be `&mut i32` //! // and are then unable to coerce `&7i32` to `&mut i32`. //! ``` use crate::astconv::AstConv; use crate::check::FnCtxt; use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; use rustc_infer::infer::{Coercion, InferOk, InferResult}; use rustc_infer::traits::Obligation; use rustc_middle::lint::in_external_macro; use rustc_middle::ty::adjustment::{ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast, }; use rustc_middle::ty::error::TypeError; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::relate::RelateResult; use rustc_middle::ty::subst::SubstsRef; use rustc_middle::ty::{self, ToPredicate, Ty, TypeAndMut}; use rustc_session::parse::feature_err; use rustc_span::symbol::sym; use rustc_span::{self, BytePos, Span}; use rustc_target::spec::abi::Abi; use rustc_trait_selection::traits::error_reporting::InferCtxtExt; use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode}; use smallvec::{smallvec, SmallVec}; use std::ops::Deref; struct Coerce<'a, 'tcx> { fcx: &'a FnCtxt<'a, 'tcx>, cause: ObligationCause<'tcx>, use_lub: bool, /// Determines whether or not allow_two_phase_borrow is set on any /// autoref adjustments we create while coercing. We don't want to /// allow deref coercions to create two-phase borrows, at least initially, /// but we do need two-phase borrows for function argument reborrows. /// See #47489 and #48598 /// See docs on the "AllowTwoPhase" type for a more detailed discussion allow_two_phase: AllowTwoPhase, } impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> { type Target = FnCtxt<'a, 'tcx>; fn deref(&self) -> &Self::Target { &self.fcx } } type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>; /// Coercing a mutable reference to an immutable works, while /// coercing `&T` to `&mut T` should be forbidden. fn coerce_mutbls<'tcx>( from_mutbl: hir::Mutability, to_mutbl: hir::Mutability, ) -> RelateResult<'tcx, ()> { match (from_mutbl, to_mutbl) { (hir::Mutability::Mut, hir::Mutability::Mut | hir::Mutability::Not) | (hir::Mutability::Not, hir::Mutability::Not) => Ok(()), (hir::Mutability::Not, hir::Mutability::Mut) => Err(TypeError::Mutability), } } /// Do not require any adjustments, i.e. coerce `x -> x`. fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> { vec![] } fn simple(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> { move |target| vec![Adjustment { kind, target }]
adj: Vec<Adjustment<'tcx>>, target: Ty<'tcx>, obligations: traits::PredicateObligations<'tcx>, ) -> CoerceResult<'tcx> { Ok(InferOk { value: (adj, target), obligations }) } impl<'f, 'tcx> Coerce<'f, 'tcx> { fn new( fcx: &'f FnCtxt<'f, 'tcx>, cause: ObligationCause<'tcx>, allow_two_phase: AllowTwoPhase, ) -> Self { Coerce { fcx, cause, allow_two_phase, use_lub: false } } fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> { debug!("unify(a: {:?}, b: {:?}, use_lub: {})", a, b, self.use_lub); self.commit_if_ok(|_| { if self.use_lub { self.at(&self.cause, self.fcx.param_env).lub(b, a) } else { self.at(&self.cause, self.fcx.param_env) .sup(b, a) .map(|InferOk { value: (), obligations }| InferOk { value: a, obligations }) } }) } /// Unify two types (using sub or lub) and produce a specific coercion. fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F) -> CoerceResult<'tcx> where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>, { self.unify(&a, &b) .and_then(|InferOk { value: ty, obligations }| success(f(ty), ty, obligations)) } fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { // First, remove any resolved type variables (at the top level, at least): let a = self.shallow_resolve(a); let b = self.shallow_resolve(b); debug!("Coerce.tys({:?} => {:?})", a, b); // Just ignore error types. if a.references_error() || b.references_error() { return success(vec![], self.fcx.tcx.ty_error(), vec![]); } // Coercing from `!` to any type is allowed: if a.is_never() { return success(simple(Adjust::NeverToAny)(b), b, vec![]); } // Coercing *from* an unresolved inference variable means that // we have no information about the source type. This will always // ultimately fall back to some form of subtyping. if a.is_ty_var() { return self.coerce_from_inference_variable(a, b, identity); } // Consider coercing the subtype to a DST // // NOTE: this is wrapped in a `commit_if_ok` because it creates // a "spurious" type variable, and we don't want to have that // type variable in memory if the coercion fails. let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b)); match unsize { Ok(_) => { debug!("coerce: unsize successful"); return unsize; } Err(TypeError::ObjectUnsafeCoercion(did)) => { debug!("coerce: unsize not object safe"); return Err(TypeError::ObjectUnsafeCoercion(did)); } Err(_) => {} } debug!("coerce: unsize failed"); // Examine the supertype and consider auto-borrowing. match *b.kind() { ty::RawPtr(mt_b) => { return self.coerce_unsafe_ptr(a, b, mt_b.mutbl); } ty::Ref(r_b, _, mutbl_b) => { return self.coerce_borrowed_pointer(a, b, r_b, mutbl_b); } _ => {} } match *a.kind() { ty::FnDef(..) => { // Function items are coercible to any closure // type; function pointers are not (that would // require double indirection). // Additionally, we permit coercion of function // items to drop the unsafe qualifier. self.coerce_from_fn_item(a, b) } ty::FnPtr(a_f) => { // We permit coercion of fn pointers to drop the // unsafe qualifier. self.coerce_from_fn_pointer(a, a_f, b) } ty::Closure(closure_def_id_a, substs_a) => { // Non-capturing closures are coercible to // function pointers or unsafe function pointers. // It cannot convert closures that require unsafe. self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b) } _ => { // Otherwise, just use unification rules. self.unify_and(a, b, identity) } } } /// Coercing *from* an inference variable. In this case, we have no information /// about the source type, so we can't really do a true coercion and we always /// fall back to subtyping (`unify_and`). fn coerce_from_inference_variable( &self, a: Ty<'tcx>, b: Ty<'tcx>, make_adjustments: impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>, ) -> CoerceResult<'tcx> { debug!("coerce_from_inference_variable(a={:?}, b={:?})", a, b); assert!(a.is_ty_var() && self.infcx.shallow_resolve(a) == a); assert!(self.infcx.shallow_resolve(b) == b); if b.is_ty_var() { // Two unresolved type variables: create a `Coerce` predicate. let target_ty = if self.use_lub { self.infcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::LatticeVariable, span: self.cause.span, }) } else { b }; let mut obligations = Vec::with_capacity(2); for &source_ty in &[a, b] { if source_ty != target_ty { obligations.push(Obligation::new( self.cause.clone(), self.param_env, ty::Binder::dummy(ty::PredicateKind::Coerce(ty::CoercePredicate { a: source_ty, b: target_ty, })) .to_predicate(self.tcx()), )); } } debug!( "coerce_from_inference_variable: two inference variables, target_ty={:?}, obligations={:?}", target_ty, obligations ); let adjustments = make_adjustments(target_ty); InferResult::Ok(InferOk { value: (adjustments, target_ty), obligations }) } else { // One unresolved type variable: just apply subtyping, we may be able // to do something useful. self.unify_and(a, b, make_adjustments) } } /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`. /// To match `A` with `B`, autoderef will be performed, /// calling `deref`/`deref_mut` where necessary. fn coerce_borrowed_pointer( &self, a: Ty<'tcx>, b: Ty<'tcx>, r_b: ty::Region<'tcx>, mutbl_b: hir::Mutability, ) -> CoerceResult<'tcx> { debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b); // If we have a parameter of type `&M T_a` and the value // provided is `expr`, we will be adding an implicit borrow, // meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore, // to type check, we will construct the type that `&M*expr` would // yield. let (r_a, mt_a) = match *a.kind() { ty::Ref(r_a, ty, mutbl) => { let mt_a = ty::TypeAndMut { ty, mutbl }; coerce_mutbls(mt_a.mutbl, mutbl_b)?; (r_a, mt_a) } _ => return self.unify_and(a, b, identity), }; let span = self.cause.span; let mut first_error = None; let mut r_borrow_var = None; let mut autoderef = self.autoderef(span, a); let mut found = None; for (referent_ty, autoderefs) in autoderef.by_ref() { if autoderefs == 0 { // Don't let this pass, otherwise it would cause // &T to autoref to &&T. continue; } // At this point, we have deref'd `a` to `referent_ty`. So // imagine we are coercing from `&'a mut Vec<T>` to `&'b mut [T]`. // In the autoderef loop for `&'a mut Vec<T>`, we would get // three callbacks: // // - `&'a mut Vec<T>` -- 0 derefs, just ignore it // - `Vec<T>` -- 1 deref // - `[T]` -- 2 deref // // At each point after the first callback, we want to // check to see whether this would match out target type // (`&'b mut [T]`) if we autoref'd it. We can't just // compare the referent types, though, because we still // have to consider the mutability. E.g., in the case // we've been considering, we have an `&mut` reference, so // the `T` in `[T]` needs to be unified with equality. // // Therefore, we construct reference types reflecting what // the types will be after we do the final auto-ref and // compare those. Note that this means we use the target // mutability [1], since it may be that we are coercing // from `&mut T` to `&U`. // // One fine point concerns the region that we use. We // choose the region such that the region of the final // type that results from `unify` will be the region we // want for the autoref: // // - if in sub mode, that means we want to use `'b` (the // region from the target reference) for both // pointers [2]. This is because sub mode (somewhat // arbitrarily) returns the subtype region. In the case // where we are coercing to a target type, we know we // want to use that target type region (`'b`) because -- // for the program to type-check -- it must be the // smaller of the two. // - One fine point. It may be surprising that we can // use `'b` without relating `'a` and `'b`. The reason // that this is ok is that what we produce is // effectively a `&'b *x` expression (if you could // annotate the region of a borrow), and regionck has // code that adds edges from the region of a borrow // (`'b`, here) into the regions in the borrowed // expression (`*x`, here). (Search for "link".) // - if in lub mode, things can get fairly complicated. The // easiest thing is just to make a fresh // region variable [4], which effectively means we defer // the decision to region inference (and regionck, which will add // some more edges to this variable). However, this can wind up // creating a crippling number of variables in some cases -- // e.g., #32278 -- so we optimize one particular case [3]. // Let me try to explain with some examples: // - The "running example" above represents the simple case, // where we have one `&` reference at the outer level and // ownership all the rest of the way down. In this case, // we want `LUB('a, 'b)` as the resulting region. // - However, if there are nested borrows, that region is // too strong. Consider a coercion from `&'a &'x Rc<T>` to // `&'b T`. In this case, `'a` is actually irrelevant. // The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)` // we get spurious errors (`ui/regions-lub-ref-ref-rc.rs`). // (The errors actually show up in borrowck, typically, because // this extra edge causes the region `'a` to be inferred to something // too big, which then results in borrowck errors.) // - We could track the innermost shared reference, but there is already // code in regionck that has the job of creating links between // the region of a borrow and the regions in the thing being // borrowed (here, `'a` and `'x`), and it knows how to handle // all the various cases. So instead we just make a region variable // and let regionck figure it out. let r = if !self.use_lub { r_b // [2] above } else if autoderefs == 1 { r_a // [3] above } else { if r_borrow_var.is_none() { // create var lazily, at most once let coercion = Coercion(span); let r = self.next_region_var(coercion); r_borrow_var = Some(r); // [4] above } r_borrow_var.unwrap() }; let derefd_ty_a = self.tcx.mk_ref( r, TypeAndMut { ty: referent_ty, mutbl: mutbl_b, // [1] above }, ); match self.unify(derefd_ty_a, b) { Ok(ok) => { found = Some(ok); break; } Err(err) => { if first_error.is_none() { first_error = Some(err); } } } } // Extract type or return an error. We return the first error // we got, which should be from relating the "base" type // (e.g., in example above, the failure from relating `Vec<T>` // to the target type), since that should be the least // confusing. let InferOk { value: ty, mut obligations } = match found { Some(d) => d, None => { let err = first_error.expect("coerce_borrowed_pointer had no error"); debug!("coerce_borrowed_pointer: failed with err = {:?}", err); return Err(err); } }; if ty == a && mt_a.mutbl == hir::Mutability::Not && autoderef.step_count() == 1 { // As a special case, if we would produce `&'a *x`, that's // a total no-op. We end up with the type `&'a T` just as // we started with. In that case, just skip it // altogether. This is just an optimization. // // Note that for `&mut`, we DO want to reborrow -- // otherwise, this would be a move, which might be an // error. For example `foo(self.x)` where `self` and // `self.x` both have `&mut `type would be a move of // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`, // which is a borrow. assert_eq!(mutbl_b, hir::Mutability::Not); // can only coerce &T -> &U return success(vec![], ty, obligations); } let InferOk { value: mut adjustments, obligations: o } = self.adjust_steps_as_infer_ok(&autoderef); obligations.extend(o); obligations.extend(autoderef.into_obligations()); // Now apply the autoref. We have to extract the region out of // the final ref type we got. let r_borrow = match ty.kind() { ty::Ref(r_borrow, _, _) => r_borrow, _ => span_bug!(span, "expected a ref type, got {:?}", ty), }; let mutbl = match mutbl_b { hir::Mutability::Not => AutoBorrowMutability::Not, hir::Mutability::Mut => { AutoBorrowMutability::Mut { allow_two_phase_borrow: self.allow_two_phase } } }; adjustments.push(Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)), target: ty, }); debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments); success(adjustments, ty, obligations) } // &[T; n] or &mut [T; n] -> &[T] // or &mut [T; n] -> &mut [T] // or &Concrete -> &Trait, etc. #[instrument(skip(self), level = "debug")] fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceResult<'tcx> { source = self.shallow_resolve(source); target = self.shallow_resolve(target); debug!(?source, ?target); // These 'if' statements require some explanation. // The `CoerceUnsized` trait is special - it is only // possible to write `impl CoerceUnsized<B> for A` where // A and B have 'matching' fields. This rules out the following // two types of blanket impls: // // `impl<T> CoerceUnsized<T> for SomeType` // `impl<T> CoerceUnsized<SomeType> for T` // // Both of these trigger a special `CoerceUnsized`-related error (E0376) // // We can take advantage of this fact to avoid performing unnecessary work. // If either `source` or `target` is a type variable, then any applicable impl // would need to be generic over the self-type (`impl<T> CoerceUnsized<SomeType> for T`) // or generic over the `CoerceUnsized` type parameter (`impl<T> CoerceUnsized<T> for // SomeType`). // // However, these are exactly the kinds of impls which are forbidden by // the compiler! Therefore, we can be sure that coercion will always fail // when either the source or target type is a type variable. This allows us // to skip performing any trait selection, and immediately bail out. if source.is_ty_var() { debug!("coerce_unsized: source is a TyVar, bailing out"); return Err(TypeError::Mismatch); } if target.is_ty_var() { debug!("coerce_unsized: target is a TyVar, bailing out"); return Err(TypeError::Mismatch); } let traits = (self.tcx.lang_items().unsize_trait(), self.tcx.lang_items().coerce_unsized_trait()); let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits { (u, cu) } else { debug!("missing Unsize or CoerceUnsized traits"); return Err(TypeError::Mismatch); }; // Note, we want to avoid unnecessary unsizing. We don't want to coerce to // a DST unless we have to. This currently comes out in the wash since // we can't unify [T] with U. But to properly support DST, we need to allow // that, at which point we will need extra checks on the target here. // Handle reborrows before selecting `Source: CoerceUnsized<Target>`. let reborrow = match (source.kind(), target.kind()) { (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => { coerce_mutbls(mutbl_a, mutbl_b)?; let coercion = Coercion(self.cause.span); let r_borrow = self.next_region_var(coercion); let mutbl = match mutbl_b { hir::Mutability::Not => AutoBorrowMutability::Not, hir::Mutability::Mut => AutoBorrowMutability::Mut { // We don't allow two-phase borrows here, at least for initial // implementation. If it happens that this coercion is a function argument, // the reborrow in coerce_borrowed_ptr will pick it up. allow_two_phase_borrow: AllowTwoPhase::No, }, }; Some(( Adjustment { kind: Adjust::Deref(None), target: ty_a }, Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)), target: self .tcx .mk_ref(r_borrow, ty::TypeAndMut { mutbl: mutbl_b, ty: ty_a }), }, )) } (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => { coerce_mutbls(mt_a, mt_b)?; Some(( Adjustment { kind: Adjust::Deref(None), target: ty_a }, Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)), target: self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mt_b, ty: ty_a }), }, )) } _ => None, }; let coerce_source = reborrow.as_ref().map_or(source, |&(_, ref r)| r.target); // Setup either a subtyping or a LUB relationship between // the `CoerceUnsized` target type and the expected type. // We only have the latter, so we use an inference variable // for the former and let type inference do the rest. let origin = TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span: self.cause.span, }; let coerce_target = self.next_ty_var(origin); let mut coercion = self.unify_and(coerce_target, target, |target| { let unsize = Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target }; match reborrow { None => vec![unsize], Some((ref deref, ref autoref)) => vec![deref.clone(), autoref.clone(), unsize], } })?; let mut selcx = traits::SelectionContext::new(self); // Create an obligation for `Source: CoerceUnsized<Target>`. let cause = ObligationCause::new( self.cause.span, self.body_id, ObligationCauseCode::Coercion { source, target }, ); // Use a FIFO queue for this custom fulfillment procedure. // // A Vec (or SmallVec) is not a natural choice for a queue. However, // this code path is hot, and this queue usually has a max length of 1 // and almost never more than 3. By using a SmallVec we avoid an // allocation, at the (very small) cost of (occasionally) having to // shift subsequent elements down when removing the front element. let mut queue: SmallVec<[_; 4]> = smallvec![traits::predicate_for_trait_def( self.tcx, self.fcx.param_env, cause, coerce_unsized_did, 0, coerce_source, &[coerce_target.into()] )]; let mut has_unsized_tuple_coercion = false; let mut has_trait_upcasting_coercion = false; // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where // inference might unify those two inner type variables later. let traits = [coerce_unsized_did, unsize_did]; while !queue.is_empty() { let obligation = queue.remove(0); debug!("coerce_unsized resolve step: {:?}", obligation); let bound_predicate = obligation.predicate.kind(); let trait_pred = match bound_predicate.skip_binder() { ty::PredicateKind::Trait(trait_pred) if traits.contains(&trait_pred.def_id()) => { if unsize_did == trait_pred.def_id() { let self_ty = trait_pred.self_ty(); let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty(); if let (ty::Dynamic(ref data_a, ..), ty::Dynamic(ref data_b, ..)) = (self_ty.kind(), unsize_ty.kind()) { if data_a.principal_def_id() != data_b.principal_def_id() { debug!("coerce_unsized: found trait upcasting coercion"); has_trait_upcasting_coercion = true; } } if let ty::Tuple(..) = unsize_ty.kind() { debug!("coerce_unsized: found unsized tuple coercion"); has_unsized_tuple_coercion = true; } } bound_predicate.rebind(trait_pred) } _ => { coercion.obligations.push(obligation); continue; } }; match selcx.select(&obligation.with(trait_pred)) { // Uncertain or unimplemented. Ok(None) => { if trait_pred.def_id() == unsize_did { let trait_pred = self.resolve_vars_if_possible(trait_pred); let self_ty = trait_pred.skip_binder().self_ty(); let unsize_ty = trait_pred.skip_binder().trait_ref.substs[1].expect_ty(); debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred); match (&self_ty.kind(), &unsize_ty.kind()) { (ty::Infer(ty::TyVar(v)), ty::Dynamic(..)) if self.type_var_is_sized(*v) => { debug!("coerce_unsized: have sized infer {:?}", v); coercion.obligations.push(obligation); // `$0: Unsize<dyn Trait>` where we know that `$0: Sized`, try going // for unsizing. } _ => { // Some other case for `$0: Unsize<Something>`. Note that we // hit this case even if `Something` is a sized type, so just // don't do the coercion. debug!("coerce_unsized: ambiguous unsize"); return Err(TypeError::Mismatch); } } } else { debug!("coerce_unsized: early return - ambiguous"); return Err(TypeError::Mismatch); } } Err(traits::Unimplemented) => { debug!("coerce_unsized: early return - can't prove obligation"); return Err(TypeError::Mismatch); } // Object safety violations or miscellaneous. Err(err) => { self.report_selection_error(obligation.clone(), &obligation, &err, false); // Treat this like an obligation and follow through // with the unsizing - the lack of a coercion should // be silent, as it causes a type mismatch later. } Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()), } } if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion { feature_err( &self.tcx.sess.parse_sess, sym::unsized_tuple_coercion, self.cause.span, "unsized tuple coercion is not stable enough for use and is subject to change", ) .emit(); } if has_trait_upcasting_coercion && !self.tcx().features().trait_upcasting { feature_err( &self.tcx.sess.parse_sess, sym::trait_upcasting, self.cause.span, "trait upcasting coercion is experimental", ) .emit(); } Ok(coercion) } fn coerce_from_safe_fn<F, G>( &self, a: Ty<'tcx>, fn_ty_a: ty::PolyFnSig<'tcx>, b: Ty<'tcx>, to_unsafe: F, normal: G, ) -> CoerceResult<'tcx> where F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>, G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>, { if let ty::FnPtr(fn_ty_b) = b.kind() { if let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) = (fn_ty_a.unsafety(), fn_ty_b.unsafety()) { let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a); return self.unify_and(unsafe_a, b, to_unsafe); } } self.unify_and(a, b, normal) } fn coerce_from_fn_pointer( &self, a: Ty<'tcx>, fn_ty_a: ty::PolyFnSig<'tcx>, b: Ty<'tcx>, ) -> CoerceResult<'tcx> { //! Attempts to coerce from the type of a Rust function item //! into a closure or a `proc`. //! let b = self.shallow_resolve(b); debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b); self.coerce_from_safe_fn( a, fn_ty_a, b, simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)), identity, ) } fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> { //! Attempts to coerce from the type of a Rust function item //! into a closure or a `proc`. let b = self.shallow_resolve(b); let InferOk { value: b, mut obligations } = self.normalize_associated_types_in_as_infer_ok(self.cause.span, b); debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b); match b.kind() { ty::FnPtr(b_sig) => { let a_sig = a.fn_sig(self.tcx); // Intrinsics are not coercible to function pointers if a_sig.abi() == Abi::RustIntrinsic || a_sig.abi() == Abi::PlatformIntrinsic { return Err(TypeError::IntrinsicCast); } // Safe `#[target_feature]` functions are not assignable to safe fn pointers (RFC 2396). if let ty::FnDef(def_id, _) = *a.kind() { if b_sig.unsafety() == hir::Unsafety::Normal && !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty() { return Err(TypeError::TargetFeatureCast(def_id)); } } let InferOk { value: a_sig, obligations: o1 } = self.normalize_associated_types_in_as_infer_ok(self.cause.span, a_sig); obligations.extend(o1); let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig); let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn( a_fn_pointer, a_sig, b, |unsafe_ty| { vec![ Adjustment { kind: Adjust::Pointer(PointerCast::ReifyFnPointer), target: a_fn_pointer, }, Adjustment { kind: Adjust::Pointer(PointerCast::UnsafeFnPointer), target: unsafe_ty, }, ] }, simple(Adjust::Pointer(PointerCast::ReifyFnPointer)), )?; obligations.extend(o2); Ok(InferOk { value, obligations }) } _ => self.unify_and(a, b, identity), } } fn coerce_closure_to_fn( &self, a: Ty<'tcx>, closure_def_id_a: DefId, substs_a: SubstsRef<'tcx>, b: Ty<'tcx>, ) -> CoerceResult<'tcx> { //! Attempts to coerce from the type of a non-capturing closure //! into a function pointer. //! let b = self.shallow_resolve(b); match b.kind() { // At this point we haven't done capture analysis, which means // that the ClosureSubsts just contains an inference variable instead // of tuple of captured types. // // All we care here is if any variable is being captured and not the exact paths, // so we check `upvars_mentioned` for root variables being captured. ty::FnPtr(fn_ty) if self .tcx .upvars_mentioned(closure_def_id_a.expect_local()) .map_or(true, |u| u.is_empty()) => { // We coerce the closure, which has fn type // `extern "rust-call" fn((arg0,arg1,...)) -> _` // to // `fn(arg0,arg1,...) -> _` // or // `unsafe fn(arg0,arg1,...) -> _` let closure_sig = substs_a.as_closure().sig(); let unsafety = fn_ty.unsafety(); let pointer_ty = self.tcx.mk_fn_ptr(self.tcx.signature_unclosure(closure_sig, unsafety)); debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty); self.unify_and( pointer_ty, b, simple(Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))), ) } _ => self.unify_and(a, b, identity), } } fn coerce_unsafe_ptr( &self, a: Ty<'tcx>, b: Ty<'tcx>, mutbl_b: hir::Mutability, ) -> CoerceResult<'tcx> { debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b); let (is_ref, mt_a) = match *a.kind() { ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }), ty::RawPtr(mt) => (false, mt), _ => return self.unify_and(a, b, identity), }; coerce_mutbls(mt_a.mutbl, mutbl_b)?; // Check that the types which they point at are compatible. let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mutbl_b, ty: mt_a.ty }); // Although references and unsafe ptrs have the same // representation, we still register an Adjust::DerefRef so that // regionck knows that the region for `a` must be valid here. if is_ref { self.unify_and(a_unsafe, b, |target| { vec![ Adjustment { kind: Adjust::Deref(None), target: mt_a.ty }, Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)), target }, ] }) } else if mt_a.mutbl != mutbl_b { self.unify_and(a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer))) } else { self.unify_and(a_unsafe, b, identity) } } } impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// Attempt to coerce an expression to a type, and return the /// adjusted type of the expression, if successful. /// Adjustments are only recorded if the coercion succeeded. /// The expressions *must not* have any pre-existing adjustments. pub fn try_coerce( &self, expr: &hir::Expr<'_>, expr_ty: Ty<'tcx>, target: Ty<'tcx>, allow_two_phase: AllowTwoPhase, cause: Option<ObligationCause<'tcx>>, ) -> RelateResult<'tcx, Ty<'tcx>> { let source = self.resolve_vars_with_obligations(expr_ty); debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target); let cause = cause.unwrap_or_else(|| self.cause(expr.span, ObligationCauseCode::ExprAssignable)); let coerce = Coerce::new(self, cause, allow_two_phase); let ok = self.commit_if_ok(|_| coerce.coerce(source, target))?; let (adjustments, _) = self.register_infer_ok_obligations(ok); self.apply_adjustments(expr, adjustments); Ok(if expr_ty.references_error() { self.tcx.ty_error() } else { target }) } /// Same as `try_coerce()`, but without side-effects. pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool { let source = self.resolve_vars_with_obligations(expr_ty); debug!("coercion::can({:?} -> {:?})", source, target); let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable); // We don't ever need two-phase here since we throw out the result of the coercion let coerce = Coerce::new(self, cause, AllowTwoPhase::No); self.probe(|_| coerce.coerce(source, target)).is_ok() } /// Given a type and a target type, this function will calculate and return /// how many dereference steps needed to achieve `expr_ty <: target`. If /// it's not possible, return `None`. pub fn deref_steps(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> Option<usize> { let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable); // We don't ever need two-phase here since we throw out the result of the coercion let coerce = Coerce::new(self, cause, AllowTwoPhase::No); coerce .autoderef(rustc_span::DUMMY_SP, expr_ty) .find_map(|(ty, steps)| self.probe(|_| coerce.unify(ty, target)).ok().map(|_| steps)) } /// Given some expressions, their known unified type and another expression, /// tries to unify the types, potentially inserting coercions on any of the /// provided expressions and returns their LUB (aka "common supertype"). /// /// This is really an internal helper. From outside the coercion /// module, you should instantiate a `CoerceMany` instance. fn try_find_coercion_lub<E>( &self, cause: &ObligationCause<'tcx>, exprs: &[E], prev_ty: Ty<'tcx>, new: &hir::Expr<'_>, new_ty: Ty<'tcx>, ) -> RelateResult<'tcx, Ty<'tcx>> where E: AsCoercionSite, { let prev_ty = self.resolve_vars_with_obligations(prev_ty); let new_ty = self.resolve_vars_with_obligations(new_ty); debug!( "coercion::try_find_coercion_lub({:?}, {:?}, exprs={:?} exprs)", prev_ty, new_ty, exprs.len() ); // The following check fixes #88097, where the compiler erroneously // attempted to coerce a closure type to itself via a function pointer. if prev_ty == new_ty { return Ok(prev_ty); } // Special-case that coercion alone cannot handle: // Function items or non-capturing closures of differing IDs or InternalSubsts. let (a_sig, b_sig) = { let is_capturing_closure = |ty| { if let &ty::Closure(closure_def_id, _substs) = ty { self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some() } else { false } }; if is_capturing_closure(prev_ty.kind()) || is_capturing_closure(new_ty.kind()) { (None, None) } else { match (prev_ty.kind(), new_ty.kind()) { (ty::FnDef(..), ty::FnDef(..)) => { // Don't reify if the function types have a LUB, i.e., they // are the same function and their parameters have a LUB. match self .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) { // We have a LUB of prev_ty and new_ty, just return it. Ok(ok) => return Ok(self.register_infer_ok_obligations(ok)), Err(_) => { (Some(prev_ty.fn_sig(self.tcx)), Some(new_ty.fn_sig(self.tcx))) } } } (ty::Closure(_, substs), ty::FnDef(..)) => { let b_sig = new_ty.fn_sig(self.tcx); let a_sig = self .tcx .signature_unclosure(substs.as_closure().sig(), b_sig.unsafety()); (Some(a_sig), Some(b_sig)) } (ty::FnDef(..), ty::Closure(_, substs)) => { let a_sig = prev_ty.fn_sig(self.tcx); let b_sig = self .tcx .signature_unclosure(substs.as_closure().sig(), a_sig.unsafety()); (Some(a_sig), Some(b_sig)) } (ty::Closure(_, substs_a), ty::Closure(_, substs_b)) => ( Some(self.tcx.signature_unclosure( substs_a.as_closure().sig(), hir::Unsafety::Normal, )), Some(self.tcx.signature_unclosure( substs_b.as_closure().sig(), hir::Unsafety::Normal, )), ), _ => (None, None), } } }; if let (Some(a_sig), Some(b_sig)) = (a_sig, b_sig) { // Intrinsics are not coercible to function pointers. if a_sig.abi() == Abi::RustIntrinsic || a_sig.abi() == Abi::PlatformIntrinsic || b_sig.abi() == Abi::RustIntrinsic || b_sig.abi() == Abi::PlatformIntrinsic { return Err(TypeError::IntrinsicCast); } // The signature must match. let a_sig = self.normalize_associated_types_in(new.span, a_sig); let b_sig = self.normalize_associated_types_in(new.span, b_sig); let sig = self .at(cause, self.param_env) .trace(prev_ty, new_ty) .lub(a_sig, b_sig) .map(|ok| self.register_infer_ok_obligations(ok))?; // Reify both sides and return the reified fn pointer type. let fn_ptr = self.tcx.mk_fn_ptr(sig); let prev_adjustment = match prev_ty.kind() { ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(a_sig.unsafety())), ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer), _ => unreachable!(), }; let next_adjustment = match new_ty.kind() { ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(b_sig.unsafety())), ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer), _ => unreachable!(), }; for expr in exprs.iter().map(|e| e.as_coercion_site()) { self.apply_adjustments( expr, vec![Adjustment { kind: prev_adjustment.clone(), target: fn_ptr }], ); } self.apply_adjustments(new, vec![Adjustment { kind: next_adjustment, target: fn_ptr }]); return Ok(fn_ptr); } // Configure a Coerce instance to compute the LUB. // We don't allow two-phase borrows on any autorefs this creates since we // probably aren't processing function arguments here and even if we were, // they're going to get autorefed again anyway and we can apply 2-phase borrows // at that time. let mut coerce = Coerce::new(self, cause.clone(), AllowTwoPhase::No); coerce.use_lub = true; // First try to coerce the new expression to the type of the previous ones, // but only if the new expression has no coercion already applied to it. let mut first_error = None; if !self.typeck_results.borrow().adjustments().contains_key(new.hir_id) { let result = self.commit_if_ok(|_| coerce.coerce(new_ty, prev_ty)); match result { Ok(ok) => { let (adjustments, target) = self.register_infer_ok_obligations(ok); self.apply_adjustments(new, adjustments); debug!( "coercion::try_find_coercion_lub: was able to coerce from previous type {:?} to new type {:?}", prev_ty, new_ty, ); return Ok(target); } Err(e) => first_error = Some(e), } } // Then try to coerce the previous expressions to the type of the new one. // This requires ensuring there are no coercions applied to *any* of the // previous expressions, other than noop reborrows (ignoring lifetimes). for expr in exprs { let expr = expr.as_coercion_site(); let noop = match self.typeck_results.borrow().expr_adjustments(expr) { &[Adjustment { kind: Adjust::Deref(_), .. }, Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. }] => { match *self.node_ty(expr.hir_id).kind() { ty::Ref(_, _, mt_orig) => { let mutbl_adj: hir::Mutability = mutbl_adj.into(); // Reborrow that we can safely ignore, because // the next adjustment can only be a Deref // which will be merged into it. mutbl_adj == mt_orig } _ => false, } } &[Adjustment { kind: Adjust::NeverToAny, .. }] | &[] => true, _ => false, }; if !noop { debug!( "coercion::try_find_coercion_lub: older expression {:?} had adjustments, requiring LUB", expr, ); return self .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) .map(|ok| self.register_infer_ok_obligations(ok)); } } match self.commit_if_ok(|_| coerce.coerce(prev_ty, new_ty)) { Err(_) => { // Avoid giving strange errors on failed attempts. if let Some(e) = first_error { Err(e) } else { self.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty)) .map(|ok| self.register_infer_ok_obligations(ok)) } } Ok(ok) => { debug!( "coercion::try_find_coercion_lub: was able to coerce previous type {:?} to new type {:?}", prev_ty, new_ty, ); let (adjustments, target) = self.register_infer_ok_obligations(ok); for expr in exprs { let expr = expr.as_coercion_site(); self.apply_adjustments(expr, adjustments.clone()); } Ok(target) } } } } /// CoerceMany encapsulates the pattern you should use when you have /// many expressions that are all getting coerced to a common /// type. This arises, for example, when you have a match (the result /// of each arm is coerced to a common type). It also arises in less /// obvious places, such as when you have many `break foo` expressions /// that target the same loop, or the various `return` expressions in /// a function. /// /// The basic protocol is as follows: /// /// - Instantiate the `CoerceMany` with an initial `expected_ty`. /// This will also serve as the "starting LUB". The expectation is /// that this type is something which all of the expressions *must* /// be coercible to. Use a fresh type variable if needed. /// - For each expression whose result is to be coerced, invoke `coerce()` with. /// - In some cases we wish to coerce "non-expressions" whose types are implicitly /// unit. This happens for example if you have a `break` with no expression, /// or an `if` with no `else`. In that case, invoke `coerce_forced_unit()`. /// - `coerce()` and `coerce_forced_unit()` may report errors. They hide this /// from you so that you don't have to worry your pretty head about it. /// But if an error is reported, the final type will be `err`. /// - Invoking `coerce()` may cause us to go and adjust the "adjustments" on /// previously coerced expressions. /// - When all done, invoke `complete()`. This will return the LUB of /// all your expressions. /// - WARNING: I don't believe this final type is guaranteed to be /// related to your initial `expected_ty` in any particular way, /// although it will typically be a subtype, so you should check it. /// - Invoking `complete()` may cause us to go and adjust the "adjustments" on /// previously coerced expressions. /// /// Example: /// /// ``` /// let mut coerce = CoerceMany::new(expected_ty); /// for expr in exprs { /// let expr_ty = fcx.check_expr_with_expectation(expr, expected); /// coerce.coerce(fcx, &cause, expr, expr_ty); /// } /// let final_ty = coerce.complete(fcx); /// ``` pub struct CoerceMany<'tcx, 'exprs, E: AsCoercionSite> { expected_ty: Ty<'tcx>, final_ty: Option<Ty<'tcx>>, expressions: Expressions<'tcx, 'exprs, E>, pushed: usize, } /// The type of a `CoerceMany` that is storing up the expressions into /// a buffer. We use this in `check/mod.rs` for things like `break`. pub type DynamicCoerceMany<'tcx> = CoerceMany<'tcx, 'tcx, &'tcx hir::Expr<'tcx>>; enum Expressions<'tcx, 'exprs, E: AsCoercionSite> { Dynamic(Vec<&'tcx hir::Expr<'tcx>>), UpFront(&'exprs [E]), } impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> { /// The usual case; collect the set of expressions dynamically. /// If the full set of coercion sites is known before hand, /// consider `with_coercion_sites()` instead to avoid allocation. pub fn new(expected_ty: Ty<'tcx>) -> Self { Self::make(expected_ty, Expressions::Dynamic(vec![])) } /// As an optimization, you can create a `CoerceMany` with a /// pre-existing slice of expressions. In this case, you are /// expected to pass each element in the slice to `coerce(...)` in /// order. This is used with arrays in particular to avoid /// needlessly cloning the slice. pub fn with_coercion_sites(expected_ty: Ty<'tcx>, coercion_sites: &'exprs [E]) -> Self { Self::make(expected_ty, Expressions::UpFront(coercion_sites)) } fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self { CoerceMany { expected_ty, final_ty: None, expressions, pushed: 0 } } /// Returns the "expected type" with which this coercion was /// constructed. This represents the "downward propagated" type /// that was given to us at the start of typing whatever construct /// we are typing (e.g., the match expression). /// /// Typically, this is used as the expected type when /// type-checking each of the alternative expressions whose types /// we are trying to merge. pub fn expected_ty(&self) -> Ty<'tcx> { self.expected_ty } /// Returns the current "merged type", representing our best-guess /// at the LUB of the expressions we've seen so far (if any). This /// isn't *final* until you call `self.final()`, which will return /// the merged type. pub fn merged_ty(&self) -> Ty<'tcx> { self.final_ty.unwrap_or(self.expected_ty) } /// Indicates that the value generated by `expression`, which is /// of type `expression_ty`, is one of the possibilities that we /// could coerce from. This will record `expression`, and later /// calls to `coerce` may come back and add adjustments and things /// if necessary. pub fn coerce<'a>( &mut self, fcx: &FnCtxt<'a, 'tcx>, cause: &ObligationCause<'tcx>, expression: &'tcx hir::Expr<'tcx>, expression_ty: Ty<'tcx>, ) { self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false) } /// Indicates that one of the inputs is a "forced unit". This /// occurs in a case like `if foo { ... };`, where the missing else /// generates a "forced unit". Another example is a `loop { break; /// }`, where the `break` has no argument expression. We treat /// these cases slightly differently for error-reporting /// purposes. Note that these tend to correspond to cases where /// the `()` expression is implicit in the source, and hence we do /// not take an expression argument. /// /// The `augment_error` gives you a chance to extend the error /// message, in case any results (e.g., we use this to suggest /// removing a `;`). pub fn coerce_forced_unit<'a>( &mut self, fcx: &FnCtxt<'a, 'tcx>, cause: &ObligationCause<'tcx>, augment_error: &mut dyn FnMut(&mut DiagnosticBuilder<'_>), label_unit_as_expected: bool, ) { self.coerce_inner( fcx, cause, None, fcx.tcx.mk_unit(), Some(augment_error), label_unit_as_expected, ) } /// The inner coercion "engine". If `expression` is `None`, this /// is a forced-unit case, and hence `expression_ty` must be /// `Nil`. #[instrument(skip(self, fcx, augment_error, label_expression_as_expected), level = "debug")] crate fn coerce_inner<'a>( &mut self, fcx: &FnCtxt<'a, 'tcx>, cause: &ObligationCause<'tcx>, expression: Option<&'tcx hir::Expr<'tcx>>, mut expression_ty: Ty<'tcx>, augment_error: Option<&mut dyn FnMut(&mut DiagnosticBuilder<'_>)>, label_expression_as_expected: bool, ) { // Incorporate whatever type inference information we have // until now; in principle we might also want to process // pending obligations, but doing so should only improve // compatibility (hopefully that is true) by helping us // uncover never types better. if expression_ty.is_ty_var() { expression_ty = fcx.infcx.shallow_resolve(expression_ty); } // If we see any error types, just propagate that error // upwards. if expression_ty.references_error() || self.merged_ty().references_error() { self.final_ty = Some(fcx.tcx.ty_error()); return; } // Handle the actual type unification etc. let result = if let Some(expression) = expression { if self.pushed == 0 { // Special-case the first expression we are coercing. // To be honest, I'm not entirely sure why we do this. // We don't allow two-phase borrows, see comment in try_find_coercion_lub for why fcx.try_coerce( expression, expression_ty, self.expected_ty, AllowTwoPhase::No, Some(cause.clone()), ) } else { match self.expressions { Expressions::Dynamic(ref exprs) => fcx.try_find_coercion_lub( cause, exprs, self.merged_ty(), expression, expression_ty, ), Expressions::UpFront(ref coercion_sites) => fcx.try_find_coercion_lub( cause, &coercion_sites[0..self.pushed], self.merged_ty(), expression, expression_ty, ), } } } else { // this is a hack for cases where we default to `()` because // the expression etc has been omitted from the source. An // example is an `if let` without an else: // // if let Some(x) = ... { } // // we wind up with a second match arm that is like `_ => // ()`. That is the case we are considering here. We take // a different path to get the right "expected, found" // message and so forth (and because we know that // `expression_ty` will be unit). // // Another example is `break` with no argument expression. assert!(expression_ty.is_unit(), "if let hack without unit type"); fcx.at(cause, fcx.param_env) .eq_exp(label_expression_as_expected, expression_ty, self.merged_ty()) .map(|infer_ok| { fcx.register_infer_ok_obligations(infer_ok); expression_ty }) }; match result { Ok(v) => { self.final_ty = Some(v); if let Some(e) = expression { match self.expressions { Expressions::Dynamic(ref mut buffer) => buffer.push(e), Expressions::UpFront(coercion_sites) => { // if the user gave us an array to validate, check that we got // the next expression in the list, as expected assert_eq!( coercion_sites[self.pushed].as_coercion_site().hir_id, e.hir_id ); } } self.pushed += 1; } } Err(coercion_error) => { let (expected, found) = if label_expression_as_expected { // In the case where this is a "forced unit", like // `break`, we want to call the `()` "expected" // since it is implied by the syntax. // (Note: not all force-units work this way.)" (expression_ty, self.final_ty.unwrap_or(self.expected_ty)) } else { // Otherwise, the "expected" type for error // reporting is the current unification type, // which is basically the LUB of the expressions // we've seen so far (combined with the expected // type) (self.final_ty.unwrap_or(self.expected_ty), expression_ty) }; let mut err; let mut unsized_return = false; match cause.code { ObligationCauseCode::ReturnNoExpression => { err = struct_span_err!( fcx.tcx.sess, cause.span, E0069, "`return;` in a function whose return type is not `()`" ); err.span_label(cause.span, "return type is not `()`"); } ObligationCauseCode::BlockTailExpression(blk_id) => { let parent_id = fcx.tcx.hir().get_parent_node(blk_id); err = self.report_return_mismatched_types( cause, expected, found, coercion_error, fcx, parent_id, expression.map(|expr| (expr, blk_id)), ); if !fcx.tcx.features().unsized_locals { unsized_return = self.is_return_ty_unsized(fcx, blk_id); } } ObligationCauseCode::ReturnValue(id) => { err = self.report_return_mismatched_types( cause, expected, found, coercion_error, fcx, id, None, ); if !fcx.tcx.features().unsized_locals { let id = fcx.tcx.hir().get_parent_node(id); unsized_return = self.is_return_ty_unsized(fcx, id); } } _ => { err = fcx.report_mismatched_types(cause, expected, found, coercion_error); } } if let Some(augment_error) = augment_error { augment_error(&mut err); } if let Some(expr) = expression { fcx.emit_coerce_suggestions(&mut err, expr, found, expected, None); } // Error possibly reported in `check_assign` so avoid emitting error again. let assign_to_bool = expression // #67273: Use initial expected type as opposed to `expected`. // Otherwise we end up using prior coercions in e.g. a `match` expression: // ``` // match i { // 0 => true, // Because of this... // 1 => i = 1, // ...`expected == bool` now, but not when checking `i = 1`. // _ => (), // }; // ``` .filter(|e| fcx.is_assign_to_bool(e, self.expected_ty())) .is_some(); err.emit_unless(assign_to_bool || unsized_return); self.final_ty = Some(fcx.tcx.ty_error()); } } } fn report_return_mismatched_types<'a>( &self, cause: &ObligationCause<'tcx>, expected: Ty<'tcx>, found: Ty<'tcx>, ty_err: TypeError<'tcx>, fcx: &FnCtxt<'a, 'tcx>, id: hir::HirId, expression: Option<(&'tcx hir::Expr<'tcx>, hir::HirId)>, ) -> DiagnosticBuilder<'a> { let mut err = fcx.report_mismatched_types(cause, expected, found, ty_err); let mut pointing_at_return_type = false; let mut fn_output = None; // Verify that this is a tail expression of a function, otherwise the // label pointing out the cause for the type coercion will be wrong // as prior return coercions would not be relevant (#57664). let parent_id = fcx.tcx.hir().get_parent_node(id); let fn_decl = if let Some((expr, blk_id)) = expression { pointing_at_return_type = fcx.suggest_mismatched_types_on_tail(&mut err, expr, expected, found, blk_id); let parent = fcx.tcx.hir().get(parent_id); if let (Some(cond_expr), true, false) = ( fcx.tcx.hir().get_if_cause(expr.hir_id), expected.is_unit(), pointing_at_return_type, ) { // If the block is from an external macro or try (`?`) desugaring, then // do not suggest adding a semicolon, because there's nowhere to put it. // See issues #81943 and #87051. if cond_expr.span.desugaring_kind().is_none() && !in_external_macro(fcx.tcx.sess, cond_expr.span) && !matches!( cond_expr.kind, hir::ExprKind::Match(.., hir::MatchSource::TryDesugar) ) { err.span_label(cond_expr.span, "expected this to be `()`"); if expr.can_have_side_effects() { fcx.suggest_semicolon_at_end(cond_expr.span, &mut err); } } } fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main)) } else { fcx.get_fn_decl(parent_id) }; if let Some((fn_decl, can_suggest)) = fn_decl { if expression.is_none() { pointing_at_return_type |= fcx.suggest_missing_return_type( &mut err, &fn_decl, expected, found, can_suggest, fcx.tcx.hir().get_parent_item(id), ); } if !pointing_at_return_type { fn_output = Some(&fn_decl.output); // `impl Trait` return type } } let parent_id = fcx.tcx.hir().get_parent_item(id); let parent_item = fcx.tcx.hir().get(parent_id); if let (Some((expr, _)), Some((fn_decl, _, _))) = (expression, fcx.get_node_fn_decl(parent_item)) { fcx.suggest_missing_break_or_return_expr( &mut err, expr, fn_decl, expected, found, id, parent_id, ); } if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.get(), fn_output) { self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output); } err } fn add_impl_trait_explanation<'a>( &self, err: &mut DiagnosticBuilder<'a>, cause: &ObligationCause<'tcx>, fcx: &FnCtxt<'a, 'tcx>, expected: Ty<'tcx>, sp: Span, fn_output: &hir::FnRetTy<'_>, ) { let return_sp = fn_output.span(); err.span_label(return_sp, "expected because this return type..."); err.span_label( sp, format!("...is found to be `{}` here", fcx.resolve_vars_with_obligations(expected)), ); let impl_trait_msg = "for information on `impl Trait`, see \ <https://doc.rust-lang.org/book/ch10-02-traits.html\ #returning-types-that-implement-traits>"; let trait_obj_msg = "for information on trait objects, see \ <https://doc.rust-lang.org/book/ch17-02-trait-objects.html\ #using-trait-objects-that-allow-for-values-of-different-types>"; err.note("to return `impl Trait`, all returned values must be of the same type"); err.note(impl_trait_msg); let snippet = fcx .tcx .sess .source_map() .span_to_snippet(return_sp) .unwrap_or_else(|_| "dyn Trait".to_string()); let mut snippet_iter = snippet.split_whitespace(); let has_impl = snippet_iter.next().map_or(false, |s| s == "impl"); // Only suggest `Box<dyn Trait>` if `Trait` in `impl Trait` is object safe. let mut is_object_safe = false; if let hir::FnRetTy::Return(ty) = fn_output { // Get the return type. if let hir::TyKind::OpaqueDef(..) = ty.kind { let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty); // Get the `impl Trait`'s `DefId`. if let ty::Opaque(def_id, _) = ty.kind() { let hir_id = fcx.tcx.hir().local_def_id_to_hir_id(def_id.expect_local()); // Get the `impl Trait`'s `Item` so that we can get its trait bounds and // get the `Trait`'s `DefId`. if let hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }) = fcx.tcx.hir().expect_item(hir_id).kind { // Are of this `impl Trait`'s traits object safe? is_object_safe = bounds.iter().all(|bound| { bound .trait_ref() .and_then(|t| t.trait_def_id()) .map_or(false, |def_id| { fcx.tcx.object_safety_violations(def_id).is_empty() }) }) } } } }; if has_impl { if is_object_safe { err.multipart_suggestion( "you could change the return type to be a boxed trait object", vec![ (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box<dyn".to_string()), (return_sp.shrink_to_hi(), ">".to_string()), ], Applicability::MachineApplicable, ); let sugg = vec![sp, cause.span] .into_iter() .flat_map(|sp| { vec![ (sp.shrink_to_lo(), "Box::new(".to_string()), (sp.shrink_to_hi(), ")".to_string()), ] .into_iter() }) .collect::<Vec<_>>(); err.multipart_suggestion( "if you change the return type to expect trait objects, box the returned \ expressions", sugg, Applicability::MaybeIncorrect, ); } else { err.help(&format!( "if the trait `{}` were object safe, you could return a boxed trait object", &snippet[5..] )); } err.note(trait_obj_msg); } err.help("you could instead create a new `enum` with a variant for each returned type"); } fn is_return_ty_unsized(&self, fcx: &FnCtxt<'a, 'tcx>, blk_id: hir::HirId) -> bool { if let Some((fn_decl, _)) = fcx.get_fn_decl(blk_id) { if let hir::FnRetTy::Return(ty) = fn_decl.output { let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty); if let ty::Dynamic(..) = ty.kind() { return true; } } } false } pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Ty<'tcx> { if let Some(final_ty) = self.final_ty { final_ty } else { // If we only had inputs that were of type `!` (or no // inputs at all), then the final type is `!`. assert_eq!(self.pushed, 0); fcx.tcx.types.never } } } /// Something that can be converted into an expression to which we can /// apply a coercion. pub trait AsCoercionSite { fn as_coercion_site(&self) -> &hir::Expr<'_>; } impl AsCoercionSite for hir::Expr<'_> { fn as_coercion_site(&self) -> &hir::Expr<'_> { self } } impl<'a, T> AsCoercionSite for &'a T where T: AsCoercionSite, { fn as_coercion_site(&self) -> &hir::Expr<'_> { (**self).as_coercion_site() } } impl AsCoercionSite for ! { fn as_coercion_site(&self) -> &hir::Expr<'_> { unreachable!() } } impl AsCoercionSite for hir::Arm<'_> { fn as_coercion_site(&self) -> &hir::Expr<'_> { &self.body } }
} /// This always returns `Ok(...)`. fn success<'tcx>(
proxy.rs
//! A proxy that forwards data to another server and forwards that server's //! responses back to clients. //! //! Because the Tokio runtime uses a thread pool, each TCP connection is //! processed concurrently with all other TCP connections across multiple //! threads. //! //! You can showcase this by running this in one terminal: //! //! cargo run --example proxy //! //! This in another terminal //! //! cargo run --example echo //! //! And finally this in another terminal //! //! cargo run --example connect 127.0.0.1:8081 //! //! This final terminal will connect to our proxy, which will in turn connect to //! the echo server, and you'll be able to see data flowing between them. #![warn(rust_2018_idioms)] use tokio::io; use tokio::net::{TcpListener, TcpStream}; use futures::future::try_join; use futures::FutureExt; use std::env; use std::error::Error; #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let listen_addr = env::args() .nth(1) .unwrap_or_else(|| "127.0.0.1:8081".to_string()); let server_addr = env::args() .nth(2) .unwrap_or_else(|| "127.0.0.1:8080".to_string()); println!("Listening on: {}", listen_addr);
while let Ok((inbound, _)) = listener.accept().await { let transfer = transfer(inbound, server_addr.clone()).map(|r| { if let Err(e) = r { println!("Failed to transfer; error={}", e); } }); tokio::spawn(transfer); } Ok(()) } async fn transfer(mut inbound: TcpStream, proxy_addr: String) -> Result<(), Box<dyn Error>> { let mut outbound = TcpStream::connect(proxy_addr).await?; let (mut ri, mut wi) = inbound.split(); let (mut ro, mut wo) = outbound.split(); let client_to_server = io::copy(&mut ri, &mut wo); let server_to_client = io::copy(&mut ro, &mut wi); try_join(client_to_server, server_to_client).await?; Ok(()) }
println!("Proxying to: {}", server_addr); let mut listener = TcpListener::bind(listen_addr).await?;
bubblesort_worst_case.rs
use criterion::{black_box, criterion_group, criterion_main, Criterion}; use oxcart::list::List; use oxcart::arraylist::ArrayList; use oxcart::bubblesort::bubblesort; fn cmp_leq<T: Eq + Ord>(a: &T, b: &T) -> bool { a <= b } fn criterion_benchmark(c: &mut Criterion) { let mut list: ArrayList<u64> = ArrayList::new(); /* need to reverse to ensure not already in ascending order */ for i in (0..1000).rev() {
|b| b.iter(|| bubblesort(&mut list, cmp_leq))); } criterion_group!(benches, criterion_benchmark); criterion_main!(benches);
list.append(i); } c.bench_function("bubblesort_worst_case 1000",
unit.test.js
const AsyncTestUtil = require('async-test-util'); const assert = require('assert'); const EthCrypto = require('../dist/lib/index'); const TEST_DATA = { address: '0x3f243FdacE01Cfd9719f7359c94BA11361f32471', privateKey: '0x107be946709e41b7895eea9f2dacf998a0a9124acbb786f0fd1a826101581a07', publicKey: 'bf1cc3154424dc22191941d9f4f50b063a2b663a2337e5548abea633c1d06eceacf2b81dd326d278cd992d5e03b0df140f2df389ac9a1c2415a220a4a9e8c046' }; const HEX_STRING = '0x55030130e79efc853f8644d32c11a58d47018cc3a08a16ac4fb9c09af4a634b16d1e37f44c60be0001670b7147dbacc6e057ac7595d74ecfd7ff58a593ee9db3cee601ee06234d200e1f2e35533533754ecbf910b86c1b7fc556b1cc2516f6dd3a25360bcd68f1af4f9450952cc9ef53de5b0c42f8f07976a05d0cfc0ee21acda7ad31cc77640fdd55349c460f94d71656e79048e5991aeb8852ad094bc96e8983232710f5b983ba07bc542ac3f4116a5d066b965e9071cb9912ed1a3da98cdd06e5ef75738fb915a6cef05497f49215bba156c2ba525b2a268be95c3efabb3f1d10fc3b3a57f8a06ef048735a5f3cf9fbbe2203b1b39568ff99e78094bf78c61514ebcbdc75fa90e7d06bc11a49959c2c4632d87384a2667f06e03216bba3b345af2cf89c439c12d4c24dc392d3ffdc9e807b00772b99299178415966d86b59478f21ae005e74c68057d5a3ccbefa08'; describe('unit.test.js', () => { describe('.createIdentity()', () => { it('should create an identity', () => { const ident = EthCrypto.createIdentity(); assert.equal(typeof ident.privateKey, 'string'); assert.equal(typeof ident.publicKey, 'string'); assert.equal(typeof ident.address, 'string'); }); it('2 identities should never be equal', () => { const ident = EthCrypto.createIdentity(); const ident2 = EthCrypto.createIdentity(); assert.notEqual(ident.privateKey, ident2.privateKey); }); it('should create an identity from the buffer', () => { const pseudoRand = Buffer.from(AsyncTestUtil.randomString(128), 'utf-8'); const ident = EthCrypto.createIdentity(pseudoRand); assert.equal(typeof ident.privateKey, 'string'); assert.equal(typeof ident.publicKey, 'string'); assert.equal(typeof ident.address, 'string'); }); it('two identities from the same buffer should be equal', () => { const pseudoRand = Buffer.from(AsyncTestUtil.randomString(128), 'utf-8'); const ident = EthCrypto.createIdentity(pseudoRand); const ident2 = EthCrypto.createIdentity(pseudoRand); assert.equal(ident.privateKey, ident2.privateKey); }); it('two identities from a different buffer must not be equal', () => { const pseudoRand = Buffer.from(AsyncTestUtil.randomString(128), 'utf-8'); const ident = EthCrypto.createIdentity(pseudoRand); const pseudoRand2 = Buffer.from(AsyncTestUtil.randomString(128), 'utf-8'); const ident2 = EthCrypto.createIdentity(pseudoRand2); assert.notEqual(ident.privateKey, ident2.privateKey); }); it('should throw when entropy is to small', async () => { const pseudoRand = Buffer.from(AsyncTestUtil.randomString(4), 'utf-8'); await AsyncTestUtil.assertThrows( () => EthCrypto.createIdentity(pseudoRand), Error, 'Entropy-size must be at least' ); }); it('should throw when entropy is no buffer', async () => { const pseudoRand = AsyncTestUtil.randomString(128); await AsyncTestUtil.assertThrows( () => EthCrypto.createIdentity(pseudoRand), Error, 'is no Buffer' ); }); }); describe('.publicKeyByPrivateKey()', () => { describe('positive', () => { it('should give the correct publicKey', () => { const publicKey = EthCrypto.publicKeyByPrivateKey(TEST_DATA.privateKey); assert.equal(publicKey, TEST_DATA.publicKey); }); it('should auto-prefix 0x', () => { const noPrefixPrivate = '43137cdb869f4375abfce46910aa24d528b2152c5a396158550158fbdb160b4f'; const publicKey = EthCrypto.publicKeyByPrivateKey(noPrefixPrivate); const publicKey2 = EthCrypto.publicKeyByPrivateKey('0x' + noPrefixPrivate); assert.equal(publicKey, publicKey2); }); }); describe('negative', () => { it('should crash when non-key given', () => { assert.throws( () => EthCrypto.publicKeyByPrivateKey( AsyncTestUtil.randomString(12) ) ); }); }); }); describe('.sign()', () => { describe('positive', () => { it('should sign the data', () => { const message = AsyncTestUtil.randomString(12); const messageHash = EthCrypto.hash.keccak256(message); const signature = EthCrypto.sign(TEST_DATA.privateKey, messageHash); assert.equal(typeof signature, 'string'); assert.ok(signature.length > 10); }); }); describe('negative', () => { it('should not sign with wrong key', () => { assert.throws( () => EthCrypto.sign( 'XXX' + AsyncTestUtil.randomString(222), AsyncTestUtil.randomString(12) ) ); }); it('should throw when non-hash given', () => { assert.throws( () => EthCrypto.sign( TEST_DATA.privateKey, AsyncTestUtil.randomString(5) ) ); }); }); }); describe('.recover()', () => { describe('positive', () => { it('should return the correct address', () => { const message = AsyncTestUtil.randomString(12); const messageHash = EthCrypto.hash.keccak256(message); const signature = EthCrypto.sign(TEST_DATA.privateKey, messageHash); const address = EthCrypto.recover(signature, messageHash); assert.equal(address, TEST_DATA.address); }); }); describe('negative', () => {}); }); describe('.recoverPublicKey()', () => { it('should recover the correct key', async () => { const message = AsyncTestUtil.randomString(12); const messageHash = EthCrypto.hash.keccak256(message); const signature = EthCrypto.sign(TEST_DATA.privateKey, messageHash); const publicKey = EthCrypto.recoverPublicKey(signature, messageHash); assert.equal(publicKey, TEST_DATA.publicKey); }); }); describe('.encryptWithPublicKey()', () => { describe('positive', () => { it('should encrypt the data', async () => { const message = AsyncTestUtil.randomString(12); const encrypted = await EthCrypto.encryptWithPublicKey( TEST_DATA.publicKey, message ); assert.equal(typeof encrypted.iv, 'string'); assert.equal(typeof encrypted.ephemPublicKey, 'string'); assert.equal(typeof encrypted.ciphertext, 'string'); assert.equal(typeof encrypted.mac, 'string'); }); it('should also work with compressed keys', async () => { const message = AsyncTestUtil.randomString(12); const ident = EthCrypto.createIdentity(); const compressed = EthCrypto.publicKey.compress(ident.publicKey); const encrypted = await EthCrypto.encryptWithPublicKey( compressed, message ); const decrypted = await EthCrypto.decryptWithPrivateKey( ident.privateKey, encrypted ); assert.equal(decrypted, message); }); }); describe('negative', () => { it('should throw when non-key given', async () => { const message = AsyncTestUtil.randomString(12); await AsyncTestUtil.assertThrows( () => EthCrypto.encryptWithPublicKey( AsyncTestUtil.randomString(12), message ), 'Error' ); }); }); }); describe('.decryptWithPrivateKey()', () => { describe('positive', () => { it('should decrypt the data', async () => { const message = AsyncTestUtil.randomString(12); const encrypted = await EthCrypto.encryptWithPublicKey( TEST_DATA.publicKey, message ); const decrypted = await EthCrypto.decryptWithPrivateKey( TEST_DATA.privateKey, encrypted ); assert.equal(decrypted, message); }); it('should also decrypt with stringified data', async () => { const message = AsyncTestUtil.randomString(12); const encrypted = await EthCrypto.encryptWithPublicKey( TEST_DATA.publicKey, message ); const encryptedString = EthCrypto.cipher.stringify(encrypted); const decrypted = await EthCrypto.decryptWithPrivateKey( TEST_DATA.privateKey, encryptedString ); assert.equal(decrypted, message); }); }); describe('negative', () => {}); }); describe('.cipher', () => { describe('.stringify()', () => { it('should stringify the cipher', async () => { const ident = EthCrypto.createIdentity(); const message = AsyncTestUtil.randomString(12); const cipher = await EthCrypto.encryptWithPublicKey( ident.publicKey, message ); const str = EthCrypto.cipher.stringify(cipher); assert.equal(typeof str, 'string'); }); it('should not stringify the string', async () => { const ident = EthCrypto.createIdentity(); const message = AsyncTestUtil.randomString(12); const cipher = await EthCrypto.encryptWithPublicKey( ident.publicKey, message ); const str = EthCrypto.cipher.stringify(cipher); const str2 = EthCrypto.cipher.stringify(str);
}); }); describe('.parse()', () => { it('should parse the equal object', async () => { const ident = EthCrypto.createIdentity(); const message = AsyncTestUtil.randomString(12); const cipher = await EthCrypto.encryptWithPublicKey( ident.publicKey, message ); const str = EthCrypto.cipher.stringify(cipher); const cipher2 = EthCrypto.cipher.parse(str); assert.deepEqual(cipher, cipher2); }); it('should also work with different message-length', async () => { const ident = EthCrypto.createIdentity(); const message = AsyncTestUtil.randomString(120); const cipher = await EthCrypto.encryptWithPublicKey( ident.publicKey, message ); const str = EthCrypto.cipher.stringify(cipher); const cipher2 = EthCrypto.cipher.parse(str); assert.deepEqual(cipher, cipher2); }); }); }); describe('.publicKey', () => { describe('.compress()', () => { it('should compress the key', () => { const uncompressed = 'a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b4adf14868d8449c9b3e50d3d6338f3e5a2d3445abe679cddbe75cb893475806f'; const compressed = EthCrypto.publicKey.compress(uncompressed); assert.equal(typeof compressed, 'string'); assert.ok(compressed.startsWith('03')); }); it('should also work with trailing 04', () => { const uncompressed = '04a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b4adf14868d8449c9b3e50d3d6338f3e5a2d3445abe679cddbe75cb893475806f'; const compressed = EthCrypto.publicKey.compress(uncompressed); assert.equal(typeof compressed, 'string'); assert.ok(compressed.startsWith('03')); }); it('should also work when compressed already given', () => { const uncompressed = '03a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b'; const compressed = EthCrypto.publicKey.compress(uncompressed); assert.equal(typeof compressed, 'string'); assert.ok(compressed.startsWith('03')); }); }); describe('.decompress()', () => { it('should decompress', () => { const compressed = '03a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b'; const uncompressed = EthCrypto.publicKey.decompress(compressed); assert.equal(typeof uncompressed, 'string'); const buf = new Buffer(uncompressed, 'hex'); assert.equal(buf.length, 64); }); it('should work when already uncompressed', () => { const compressed = '04a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b4adf14868d8449c9b3e50d3d6338f3e5a2d3445abe679cddbe75cb893475806f'; const uncompressed = EthCrypto.publicKey.decompress(compressed); assert.equal(typeof uncompressed, 'string'); const buf = new Buffer(uncompressed, 'hex'); assert.equal(buf.length, 64); }); it('should work when already uncompressed (no04)', () => { const compressed = 'a34d6aef3eb42335fb3cacb59478c0b44c0bbeb8bb4ca427dbc7044157a5d24b4adf14868d8449c9b3e50d3d6338f3e5a2d3445abe679cddbe75cb893475806f'; const uncompressed = EthCrypto.publicKey.decompress(compressed); assert.equal(typeof uncompressed, 'string'); const buf = new Buffer(uncompressed, 'hex'); assert.equal(buf.length, 64); }); }); describe('.toAddress()', () => { describe('positive', () => { it('should generate the correct address', () => { const address = EthCrypto.publicKey.toAddress(TEST_DATA.publicKey); assert.equal(address, TEST_DATA.address); }); it('should work with compressed key', () => { const ident = EthCrypto.createIdentity(); const compressed = EthCrypto.publicKey.compress(ident.publicKey); const address = EthCrypto.publicKey.toAddress(compressed); assert.equal(address, ident.address); }); }); describe('negative', () => { assert.throws( () => EthCrypto.publicKey.toAddress( AsyncTestUtil.randomString(12) ) ); }); }); }); describe('.signTransaction()', () => { describe('positive', () => { it('should sign our transaction', () => { const ident = EthCrypto.createIdentity(); const rawTx = { from: ident.address, to: '0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0', value: 1000000000000000000, gasPrice: 5000000000, gasLimit: 21000 }; const signed = EthCrypto.signTransaction( rawTx, ident.privateKey ); assert.equal(typeof signed, 'string'); }); }); describe('negative', () => { it('should throw on non-key', () => { const ident = EthCrypto.createIdentity(); const rawTx = { from: ident.address, to: '0x86Fa049857E0209aa7D9e616F7eb3b3B78ECfdb0', value: 1000000000000000000, gasPrice: 5000000000, gasLimit: 21000 }; const ident2 = EthCrypto.createIdentity(); assert.throws( () => EthCrypto.signTransaction( rawTx, ident2.privateKey ) ); }); }); }); describe('hex', () => { it('compress/decompress to utf16', () => { const compressed = EthCrypto.hex.compress(HEX_STRING, false); assert.ok(compressed.length < HEX_STRING.length); const decompressed = EthCrypto.hex.decompress(compressed, false); assert.equal(decompressed, HEX_STRING); }); it('compress/decompress to base64', () => { const compressed = EthCrypto.hex.compress(HEX_STRING, true); assert.ok(compressed.length < HEX_STRING.length); const decompressed = EthCrypto.hex.decompress(compressed, true); assert.equal(decompressed, HEX_STRING); }); }); /* describe('.testBlock()', ()=> { describe('positive', ()=> {}); describe('negative', ()=> {}); }); */ });
assert.equal(str, str2);
cluster_patch.go
// Copyright 2019 Istio Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package envoyfilter import ( cluster "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3" "github.com/gogo/protobuf/proto" networking "istio.io/api/networking/v1alpha3" "istio.io/pkg/log" "istio.io/istio/pilot/pkg/model" "istio.io/istio/pilot/pkg/util/runtime" "istio.io/istio/pkg/config/host" ) // ApplyClusterPatches applies patches to CDS clusters func ApplyClusterPatches( patchContext networking.EnvoyFilter_PatchContext, proxy *model.Proxy, push *model.PushContext, clusters []*cluster.Cluster) (out []*cluster.Cluster) { defer runtime.HandleCrash(func() { log.Errorf("clusters patch caused panic, so the patches did not take effect") }) // In case the patches cause panic, use the clusters generated before to reduce the influence. out = clusters efw := push.EnvoyFilters(proxy) if efw == nil { return out } clustersRemoved := false for _, cp := range efw.Patches[networking.EnvoyFilter_CLUSTER] { if cp.Operation != networking.EnvoyFilter_Patch_REMOVE && cp.Operation != networking.EnvoyFilter_Patch_MERGE
for i := range clusters { if clusters[i] == nil { // deleted by the remove operation continue } if commonConditionMatch(patchContext, cp) && clusterMatch(clusters[i], cp) { if cp.Operation == networking.EnvoyFilter_Patch_REMOVE { clusters[i] = nil clustersRemoved = true } else { proto.Merge(clusters[i], cp.Value) } } } } // Add cluster if the operation is add, and patch context matches for _, cp := range efw.Patches[networking.EnvoyFilter_CLUSTER] { if cp.Operation == networking.EnvoyFilter_Patch_ADD { if commonConditionMatch(patchContext, cp) { clusters = append(clusters, proto.Clone(cp.Value).(*cluster.Cluster)) } } } if clustersRemoved { trimmedClusters := make([]*cluster.Cluster, 0, len(clusters)) for i := range clusters { if clusters[i] == nil { continue } trimmedClusters = append(trimmedClusters, clusters[i]) } clusters = trimmedClusters } return clusters } func clusterMatch(cluster *cluster.Cluster, cp *model.EnvoyFilterConfigPatchWrapper) bool { cMatch := cp.Match.GetCluster() if cMatch == nil { return true } if cMatch.Name != "" { return cMatch.Name == cluster.Name } _, subset, hostname, port := model.ParseSubsetKey(cluster.Name) if cMatch.Subset != "" && cMatch.Subset != subset { return false } if cMatch.Service != "" && host.Name(cMatch.Service) != hostname { return false } // FIXME: Ports on a cluster can be 0. the API only takes uint32 for ports // We should either make that field in API as a wrapper type or switch to int if cMatch.PortNumber != 0 && int(cMatch.PortNumber) != port { return false } return true }
{ continue }
anchor_generator.py
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ==============================================================================
"""Base anchor generator. The job of the anchor generator is to create (or load) a collection of bounding boxes to be used as anchors. Generated anchors are assumed to match some convolutional grid or list of grid shapes. For example, we might want to generate anchors matching an 8x8 feature map and a 4x4 feature map. If we place 3 anchors per grid location on the first feature map and 6 anchors per grid location on the second feature map, then 3*8*8 + 6*4*4 = 288 anchors are generated in total. To support fully convolutional settings, feature map shapes are passed dynamically at generation time. The number of anchors to place at each location is static --- implementations of AnchorGenerator must always be able return the number of anchors that it uses per location for each feature map. """ from abc import ABCMeta from abc import abstractmethod import tensorflow as tf class AnchorGenerator(object): """Abstract base class for anchor generators.""" __metaclass__ = ABCMeta @abstractmethod def name_scope(self): """Name scope. Must be defined by implementations. Returns: a string representing the name scope of the anchor generation operation. """ pass @property def check_num_anchors(self): """Whether to dynamically check the number of anchors generated. Can be overridden by implementations that would like to disable this behavior. Returns: a boolean controlling whether the Generate function should dynamically check the number of anchors generated against the mathematically expected number of anchors. """ return True @abstractmethod def num_anchors_per_location(self): """Returns the number of anchors per spatial location. Returns: a list of integers, one for each expected feature map to be passed to the `generate` function. """ pass def generate(self, feature_map_shape_list, **params): """Generates a collection of bounding boxes to be used as anchors. TODO(rathodv): remove **params from argument list and make stride and offsets (for multiple_grid_anchor_generator) constructor arguments. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Pairs can be provided as 1-dimensional integer tensors of length 2 or simply as tuples of integers. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxLists each holding anchor boxes corresponding to the input feature map shapes. Raises: ValueError: if the number of feature map shapes does not match the length of NumAnchorsPerLocation. """ if self.check_num_anchors and ( len(feature_map_shape_list) != len(self.num_anchors_per_location())): raise ValueError('Number of feature maps is expected to equal the length ' 'of `num_anchors_per_location`.') with tf.name_scope(self.name_scope()): anchors_list = self._generate(feature_map_shape_list, **params) if self.check_num_anchors: with tf.control_dependencies([ self._assert_correct_number_of_anchors( anchors_list, feature_map_shape_list)]): for item in anchors_list: item.set(tf.identity(item.get())) return anchors_list @abstractmethod def _generate(self, feature_map_shape_list, **params): """To be overridden by implementations. Args: feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. **params: parameters for anchor generation op Returns: boxes_list: a list of BoxList, each holding a collection of N anchor boxes. """ pass def _assert_correct_number_of_anchors(self, anchors_list, feature_map_shape_list): """Assert that correct number of anchors was generated. Args: anchors_list: A list of box_list.BoxList object holding anchors generated. feature_map_shape_list: list of (height, width) pairs in the format [(height_0, width_0), (height_1, width_1), ...] that the generated anchors must align with. Returns: Op that raises InvalidArgumentError if the number of anchors does not match the number of expected anchors. """ expected_num_anchors = 0 actual_num_anchors = 0 for num_anchors_per_location, feature_map_shape, anchors in zip( self.num_anchors_per_location(), feature_map_shape_list, anchors_list): expected_num_anchors += (num_anchors_per_location * feature_map_shape[0] * feature_map_shape[1]) actual_num_anchors += anchors.num_boxes() return tf.assert_equal(expected_num_anchors, actual_num_anchors)
proteusmirabilis.py
""" This file offers the methods to automatically retrieve the graph Proteus mirabilis. The graph is automatically retrieved from the STRING repository. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:06:19.073950 The undirected graph Proteus mirabilis has 3626 nodes and 280355 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04266 and has 14 connected components, where the component with most nodes has 3592 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 128, the mean node degree is 154.64, and the node degree mode is 1. The top 5 most central nodes are 529507.PMI2600 (degree 1405), 529507.PMI2826 (degree 1179), 529507.PMI1545 (degree 1018), 529507.PMI3678 (degree 983) and 529507.PMI2101 (degree 965). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import ProteusMirabilis # Then load the graph graph = ProteusMirabilis() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ from typing import Dict from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error def ProteusMirabilis( directed: bool = False, verbose: int = 2, cache_path: str = "graphs/string", **additional_graph_kwargs: Dict ) -> EnsmallenGraph:
"""Return new instance of the Proteus mirabilis graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Proteus mirabilis graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 21:06:19.073950 The undirected graph Proteus mirabilis has 3626 nodes and 280355 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.04266 and has 14 connected components, where the component with most nodes has 3592 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 128, the mean node degree is 154.64, and the node degree mode is 1. The top 5 most central nodes are 529507.PMI2600 (degree 1405), 529507.PMI2826 (degree 1179), 529507.PMI1545 (degree 1018), 529507.PMI3678 (degree 983) and 529507.PMI2101 (degree 965). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import ProteusMirabilis # Then load the graph graph = ProteusMirabilis() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks. """ return AutomaticallyRetrievedGraph( graph_name="ProteusMirabilis", dataset="string", directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs )()
0001_initial.py
# Generated by Django 2.0.7 on 2018-07-20 17:11 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class
(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='CompletedTopics', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ], ), migrations.CreateModel( name='Topic', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100, unique=True)), ('description', models.TextField()), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('slug', models.SlugField(blank=True, default='djangodbmodelsfieldscharfield')), ], ), migrations.CreateModel( name='Track', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=100, unique=True)), ('description', models.TextField()), ('number_of_topics', models.PositiveSmallIntegerField(blank=True, default=0)), ('created_at', models.DateTimeField(auto_now_add=True)), ('slug', models.SlugField(blank=True, default='djangodbmodelsfieldscharfield')), ], ), migrations.AddField( model_name='topic', name='track', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tracks.Track'), ), migrations.AddField( model_name='completedtopics', name='topic', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='tracks.Topic'), ), migrations.AddField( model_name='completedtopics', name='user', field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
Migration
extract_features.py
import pandas as pd import numpy as np from utils import * from sklearn.preprocessing import StandardScaler from collections import defaultdict import re def format_labels(file_path, timelines, mapping): most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first") label_features = pd.read_csv(file_path) formatted_features = reformat4pycox(["report_id"], label_features) #Connect subject to report data_frames = [timelines, most_recent] data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames) #Connect report to labels data_frames = [data_df, formatted_features] data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames) for i in ["ordering_date", "report_id"]: del data_df[i] return data_df def format_hidden_features(file_path, timelines, mapping): loaded = np.load(file_path) most_recent = mapping.sort_values(["subject_id", "ordering_date"], ascending=False).drop_duplicates("subject_id", keep="first") report_ids = list(most_recent['report_id']) mutable_file = {} for id in report_ids: mutable_file[id] = loaded[id].flatten() loaded = mutable_file label_features = pd.DataFrame(loaded.values(), index=loaded) cols = list(label_features.columns) xcols = ["x" + str(i) for i in cols] rename_dict = dict(zip(cols,xcols)) rename_dict["index"] = "report_id" label_features = label_features.reset_index().rename(columns=rename_dict) #Connect subject to report data_frames = [timelines, most_recent] data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames) #Connect report to labels data_frames = [data_df, label_features] data_df = reduce(lambda left,right: pd.merge(left,right,on="report_id"), data_frames) for i in ["ordering_date", "report_id"]: del data_df[i] return data_df def format_hf_sequence(file_path, timelines, mapping): loaded = np.load(file_path) top3_reports = mapping.sort_values(["subject_id", "ordering_date"], ascending=True).groupby("subject_id").tail(3) #Create a list of report ids report_dict = top3_reports.groupby("subject_id")["report_id"].apply(list).to_dict() #Create a dict of report arrays. Format: key: array of report embeddings embedding_dict = defaultdict(list) for k,v in report_dict.items(): for vi in v: embedding_dict[k].append(loaded[vi]) embedding_dict[k] = np.vstack(embedding_dict[k]) #Converting embedding dict into dataframe label_features = pd.DataFrame(embedding_dict.values(), index=embedding_dict) label_features[0] = label_features[0].apply(lambda x: add_paddings(x))
merged = list(itertools.chain(*list2d)) scaler = StandardScaler() scaler.fit(merged) label_features[0] = label_features[0].apply(lambda x: scaler.transform(x)) cols = list(label_features.columns) xcols = ["x" + str(i) for i in cols] rename_dict = dict(zip(cols,xcols)) label_features = label_features.rename(columns=rename_dict) label_features = label_features.reset_index().rename(columns={"index": "subject_id"}) data_frames = [timelines, label_features] data_df = reduce(lambda left,right: pd.merge(left,right,on="subject_id"), data_frames) return data_df
list2d = label_features[0]
color.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package color implements a basic color library. package color // Color can convert itself to alpha-premultiplied 16-bits per channel RGBA. // The conversion may be lossy. type Color interface { // RGBA returns the alpha-premultiplied red, green, blue and alpha values // for the color. Each value ranges within [0, 0xffff], but is represented // by a uint32 so that multiplying by a blend factor up to 0xffff will not // overflow. // // An alpha-premultiplied color component c has been scaled by alpha (a), // so has valid values 0 <= c <= a. RGBA() (r, g, b, a uint32) } // RGBA represents a traditional 32-bit alpha-premultiplied color, having 8 // bits for each of red, green, blue and alpha. // // An alpha-premultiplied color component C has been scaled by alpha (A), so // has valid values 0 <= C <= A. type RGBA struct { R, G, B, A uint8 } func (c RGBA) RGBA() (r, g, b, a uint32) { r = uint32(c.R) r |= r << 8 g = uint32(c.G) g |= g << 8 b = uint32(c.B) b |= b << 8 a = uint32(c.A) a |= a << 8 return } // RGBA64 represents a 64-bit alpha-premultiplied color, having 16 bits for // each of red, green, blue and alpha. // // An alpha-premultiplied color component C has been scaled by alpha (A), so // has valid values 0 <= C <= A. type RGBA64 struct { R, G, B, A uint16 } func (c RGBA64) RGBA() (r, g, b, a uint32) { return uint32(c.R), uint32(c.G), uint32(c.B), uint32(c.A) } // NRGBA represents a non-alpha-premultiplied 32-bit color. type NRGBA struct { R, G, B, A uint8 } func (c NRGBA) RGBA() (r, g, b, a uint32) { r = uint32(c.R) r |= r << 8 r *= uint32(c.A) r /= 0xff g = uint32(c.G) g |= g << 8 g *= uint32(c.A) g /= 0xff b = uint32(c.B) b |= b << 8 b *= uint32(c.A) b /= 0xff a = uint32(c.A) a |= a << 8 return } // NRGBA64 represents a non-alpha-premultiplied 64-bit color, // having 16 bits for each of red, green, blue and alpha. type NRGBA64 struct { R, G, B, A uint16 } func (c NRGBA64) RGBA() (r, g, b, a uint32) { r = uint32(c.R) r *= uint32(c.A) r /= 0xffff g = uint32(c.G) g *= uint32(c.A) g /= 0xffff b = uint32(c.B) b *= uint32(c.A) b /= 0xffff a = uint32(c.A) return } // Alpha represents an 8-bit alpha color. type Alpha struct { A uint8 } func (c Alpha) RGBA() (r, g, b, a uint32) { a = uint32(c.A) a |= a << 8 return a, a, a, a } // Alpha16 represents a 16-bit alpha color. type Alpha16 struct { A uint16 } func (c Alpha16) RGBA() (r, g, b, a uint32) { a = uint32(c.A) return a, a, a, a } // Gray represents an 8-bit grayscale color. type Gray struct { Y uint8 } func (c Gray) RGBA() (r, g, b, a uint32) { y := uint32(c.Y) y |= y << 8 return y, y, y, 0xffff } // Gray16 represents a 16-bit grayscale color. type Gray16 struct { Y uint16 } func (c Gray16) RGBA() (r, g, b, a uint32) { y := uint32(c.Y) return y, y, y, 0xffff } // Model can convert any Color to one from its own color model. The conversion // may be lossy. type Model interface { Convert(c Color) Color } // ModelFunc returns a Model that invokes f to implement the conversion. func ModelFunc(f func(Color) Color) Model { // Note: using *modelFunc as the implementation // means that callers can still use comparisons // like m == RGBAModel. This is not possible if // we use the func value directly, because funcs // are no longer comparable. return &modelFunc{f} } type modelFunc struct { f func(Color) Color } func (m *modelFunc) Convert(c Color) Color { return m.f(c) } // Models for the standard color types. var ( RGBAModel Model = ModelFunc(rgbaModel) RGBA64Model Model = ModelFunc(rgba64Model) NRGBAModel Model = ModelFunc(nrgbaModel) NRGBA64Model Model = ModelFunc(nrgba64Model) AlphaModel Model = ModelFunc(alphaModel) Alpha16Model Model = ModelFunc(alpha16Model) GrayModel Model = ModelFunc(grayModel) Gray16Model Model = ModelFunc(gray16Model) ) func rgbaModel(c Color) Color { if _, ok := c.(RGBA); ok { return c } r, g, b, a := c.RGBA() return RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} } func rgba64Model(c Color) Color { if _, ok := c.(RGBA64); ok { return c } r, g, b, a := c.RGBA() return RGBA64{uint16(r), uint16(g), uint16(b), uint16(a)} } func nrgbaModel(c Color) Color { if _, ok := c.(NRGBA); ok { return c } r, g, b, a := c.RGBA() if a == 0xffff { return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), 0xff} } if a == 0 { return NRGBA{0, 0, 0, 0} } // Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a. r = (r * 0xffff) / a g = (g * 0xffff) / a b = (b * 0xffff) / a return NRGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), uint8(a >> 8)} } func nrgba64Model(c Color) Color { if _, ok := c.(NRGBA64); ok { return c } r, g, b, a := c.RGBA() if a == 0xffff { return NRGBA64{uint16(r), uint16(g), uint16(b), 0xffff} } if a == 0 { return NRGBA64{0, 0, 0, 0} } // Since Color.RGBA returns an alpha-premultiplied color, we should have r <= a && g <= a && b <= a. r = (r * 0xffff) / a g = (g * 0xffff) / a b = (b * 0xffff) / a return NRGBA64{uint16(r), uint16(g), uint16(b), uint16(a)} } func
(c Color) Color { if _, ok := c.(Alpha); ok { return c } _, _, _, a := c.RGBA() return Alpha{uint8(a >> 8)} } func alpha16Model(c Color) Color { if _, ok := c.(Alpha16); ok { return c } _, _, _, a := c.RGBA() return Alpha16{uint16(a)} } func grayModel(c Color) Color { if _, ok := c.(Gray); ok { return c } r, g, b, _ := c.RGBA() // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same // as those given by the JFIF specification and used by func RGBToYCbCr in // ycbcr.go. // // Note that 19595 + 38470 + 7471 equals 65536. // // The 24 is 16 + 8. The 16 is the same as used in RGBToYCbCr. The 8 is // because the return value is 8 bit color, not 16 bit color. y := (19595*r + 38470*g + 7471*b + 1<<15) >> 24 return Gray{uint8(y)} } func gray16Model(c Color) Color { if _, ok := c.(Gray16); ok { return c } r, g, b, _ := c.RGBA() // These coefficients (the fractions 0.299, 0.587 and 0.114) are the same // as those given by the JFIF specification and used by func RGBToYCbCr in // ycbcr.go. // // Note that 19595 + 38470 + 7471 equals 65536. y := (19595*r + 38470*g + 7471*b + 1<<15) >> 16 return Gray16{uint16(y)} } // Palette is a palette of colors. type Palette []Color // Convert returns the palette color closest to c in Euclidean R,G,B space. func (p Palette) Convert(c Color) Color { if len(p) == 0 { return nil } return p[p.Index(c)] } // Index returns the index of the palette color closest to c in Euclidean // R,G,B,A space. func (p Palette) Index(c Color) int { // A batch version of this computation is in image/draw/draw.go. cr, cg, cb, ca := c.RGBA() ret, bestSum := 0, uint32(1<<32-1) for i, v := range p { vr, vg, vb, va := v.RGBA() sum := sqDiff(cr, vr) + sqDiff(cg, vg) + sqDiff(cb, vb) + sqDiff(ca, va) if sum < bestSum { if sum == 0 { return i } ret, bestSum = i, sum } } return ret } // sqDiff returns the squared-difference of x and y, shifted by 2 so that // adding four of those won't overflow a uint32. // // x and y are both assumed to be in the range [0, 0xffff]. func sqDiff(x, y uint32) uint32 { var d uint32 if x > y { d = x - y } else { d = y - x } return (d * d) >> 2 } // Standard colors. var ( Black = Gray16{0} White = Gray16{0xffff} Transparent = Alpha16{0} Opaque = Alpha16{0xffff} )
alphaModel
openMetrics.js
import React from 'react'; import PropTypes from 'prop-types'; import styled from 'styled-components'; import theme from '../../styles/theme'; import closeIcon from '../../icons/close.svg'; // The modal "window" const StyledOpenMetric = styled.div ` display: flex; justify-content: center; align-items:center; background-color: ${theme.template}; border-radius: 5; margin: 25px auto; padding: 30; min-width: 200px; max-width: 150px; min-height: 200px; `; class
extends React.Component { render() { return (<StyledOpenMetric> {this.props.text} </StyledOpenMetric>); } } OpenMetrics.propTypes = { text: PropTypes.string.isRequired }; export default OpenMetrics;
OpenMetrics
kendo.culture.dv-MV.min.js
/** * Kendo UI v2016.1.226 (http://www.telerik.com/kendo-ui) * Copyright 2016 Telerik AD. All rights reserved. * * Kendo UI commercial licenses may be obtained at * http://www.telerik.com/purchase/license-agreement/kendo-ui-complete * If you do not own a commercial license, this file shall be governed by the trial license terms.
*/ !function(d){"function"==typeof define&&define.amd?define(["kendo.core.min"],d):d()}(function(){!function(d,M){kendo.cultures["dv-MV"]={name:"dv-MV",numberFormat:{pattern:["-n"],decimals:2,",":",",".":".",groupSize:[3],percent:{pattern:["-n %","n %"],decimals:2,",":",",".":".",groupSize:[3],symbol:"%"},currency:{name:"Rufiyaa",abbr:"MVR",pattern:["n $-","n $"],decimals:2,",":",",".":".",groupSize:[3],symbol:"ރ."}},calendars:{standard:{days:{names:["އާދީއްތަ","ހޯމަ","އަންގާރަ","ބުދަ","ބުރާސްފަތި","ހުކުރު","ހޮނިހިރު"],namesAbbr:["އާދީއްތަ","ހޯމަ","އަންގާރަ","ބުދަ","ބުރާސްފަތި","ހުކުރު","ހޮނިހިރު"],namesShort:["އާ","ހޯ","އަ","ބު","ބު","ހު","ހޮ"]},months:{names:["ޖަނަވަރީ","ފެބްރުއަރީ","މާރޗް","އޭޕްރިލް","މެއި","ޖޫން","ޖުލައި","އޮގަސްޓް","ސެޕްޓެމްބަރ","އޮކްޓޯބަރ","ނޮވެމްބަރ","ޑިސެމްބަރ"],namesAbbr:["ޖަނަވަރީ","ފެބްރުއަރީ","މާރޗް","އޭޕްރިލް","މެއި","ޖޫން","ޖުލައި","އޮގަސްޓް","ސެޕްޓެމްބަރ","އޮކްޓޯބަރ","ނޮވެމްބަރ","ޑިސެމްބަރ"]},AM:["މކ","މކ","މކ"],PM:["މފ","މފ","މފ"],patterns:{d:"dd/MM/yy",D:"ddd, yyyy MMMM dd",F:"ddd, yyyy MMMM dd HH:mm:ss",g:"dd/MM/yy HH:mm",G:"dd/MM/yy HH:mm:ss",m:"MMMM dd",M:"MMMM dd",s:"yyyy'-'MM'-'dd'T'HH':'mm':'ss",t:"HH:mm",T:"HH:mm:ss",u:"yyyy'-'MM'-'dd HH':'mm':'ss'Z'",y:"yyyy, MMMM",Y:"yyyy, MMMM"},"/":"/",":":":",firstDay:0}}}}(this)}); //# sourceMappingURL=kendo.culture.dv-MV.min.js.map
getRouteFilter.go
// *** WARNING: this file was generated by the Pulumi SDK Generator. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package v20200501 import ( "github.com/pulumi/pulumi/sdk/v2/go/pulumi" ) // Route Filter Resource. func LookupRouteFilter(ctx *pulumi.Context, args *LookupRouteFilterArgs, opts ...pulumi.InvokeOption) (*LookupRouteFilterResult, error) { var rv LookupRouteFilterResult err := ctx.Invoke("azure-nextgen:network/v20200501:getRouteFilter", args, &rv, opts...) if err != nil
return &rv, nil } type LookupRouteFilterArgs struct { // Expands referenced express route bgp peering resources. Expand *string `pulumi:"expand"` // The name of the resource group. ResourceGroupName string `pulumi:"resourceGroupName"` // The name of the route filter. RouteFilterName string `pulumi:"routeFilterName"` } // Route Filter Resource. type LookupRouteFilterResult struct { // A unique read-only string that changes whenever the resource is updated. Etag string `pulumi:"etag"` // Resource ID. Id *string `pulumi:"id"` // A collection of references to express route circuit ipv6 peerings. Ipv6Peerings []ExpressRouteCircuitPeeringResponse `pulumi:"ipv6Peerings"` // Resource location. Location string `pulumi:"location"` // Resource name. Name string `pulumi:"name"` // A collection of references to express route circuit peerings. Peerings []ExpressRouteCircuitPeeringResponse `pulumi:"peerings"` // The provisioning state of the route filter resource. ProvisioningState string `pulumi:"provisioningState"` // Collection of RouteFilterRules contained within a route filter. Rules []RouteFilterRuleResponse `pulumi:"rules"` // Resource tags. Tags map[string]string `pulumi:"tags"` // Resource type. Type string `pulumi:"type"` }
{ return nil, err }
metadata.js
import _ from 'underscore'; import cloneDeep from 'lodash/cloneDeep'; /** * Returns the metadata of the state with the new empty field. If the field does * not exist, returns the original metadata. Does not mutate the given state. * @param {Object} state * @param {String} namePrefix * @return {Object} metadata */ export const addField = (state, namePrefix) => { let tmpState = cloneDeep(state); let field = eval(`tmpState.${namePrefix}`); if (field === undefined) return tmpState.metadata; if (_.isArray(field)) field.push(''); else field['New field ' + state.new_field_count] = ''; return tmpState.metadata; }; /** * Returns the metadata of the state with the removed key. If the field does not * exist, returns the original metadata. Does not mutate the given state. * @param {Object} state * @param {String} namePrefix * @param {String} key * @return {Object} metadata */ export const removeField = (state, namePrefix, key) => { let tmpState = cloneDeep(state); let field = eval(`tmpState.${namePrefix}`); if (field === undefined) return tmpState.metadata; if (_.isArray(field)) { if (key >= field.length) { return tmpState.metadata; } field.splice(key, 1); } else { if (!_.has(field, key)) { return tmpState.metadata; } delete field[key]; } return tmpState.metadata; }; /** * Returns the metadata of the state with the updated key. If the field does not * exist or the key already exists, returns the original metadata. Does not * mutate the given state. * @param {Object} state * @param {String} namePrefix * @param {String} fieldKey * @param {String} newKey * @return {Object} metadata */ export const updateFieldKey = (state, namePrefix, fieldKey, newKey) => { let tmpState = cloneDeep(state); let field = eval(`tmpState.${namePrefix}`); if (field === undefined) return tmpState.metadata; if (_.has(field, newKey)) return tmpState.metadata; field = Object.keys(field) .reduce((result, current) => { if (current == fieldKey) result[newKey] = field[current]; else result[current] = field[current]; return result; }, {}); eval(`tmpState.${namePrefix} = field`); return tmpState.metadata; }; /** * Returns the metadata of the state with the updated value of given path(nameAttr). * If the field does not exist, creates a new field. Does not mutate the given state. * @param {Object} state * @param {String} nameAttr * @param {String} value * @return {Object} metadata */ export const updateFieldValue = (state, nameAttr, value) => { let tmpState = cloneDeep(state); eval(`tmpState.${nameAttr} = value`); return tmpState.metadata; }; /** * Returns the metadata of the state with the converted type of given path(nameAttr). * If the field does not exist, returns the original metadata. * Does not mutate the given state. * @param {Object} state * @param {String} nameAttr * @param {String} convertType * @return {Object} metadata */ export const convertField = (state, nameAttr, convertType) => { let tmpState = cloneDeep(state); let field = eval(`tmpState.${nameAttr}`); if (field === undefined) return tmpState.metadata; if (convertType == 'array') field = ['']; else if (convertType == 'object') { let key = 'New field ' + state.new_field_count; field = { [key]: '' };
}; /** * Returns the metadata of the state with the sorted array. Moves the array item to * target index, shifts the rest of them. If the given path is not an array, * returns the original metadata. Does not mutate the given state. * @param {Object} state * @param {String} namePrefix * @param {Number} srcInd * @param {Number} targetInd * @return {Object} metadata */ export const moveArrayItem = (state, namePrefix, srcInd, targetInd) => { let tmpState = cloneDeep(state); let arr = eval(`tmpState.${namePrefix}`); if (!_.isArray(arr)) return tmpState.metadata; arr.splice(targetInd, 0, arr.splice(srcInd, 1)[0]); return tmpState.metadata; };
} else field = ''; eval(`tmpState.${nameAttr} = field`); return tmpState.metadata;
web-handlers.go
/* * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "context" "crypto/subtle" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "os" "path" "reflect" "runtime" "strconv" "strings" "time" "github.com/gorilla/mux" "github.com/klauspost/compress/zip" "github.com/minio/minio-go/v7" miniogo "github.com/minio/minio-go/v7" miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio/cmd/config/dns" "github.com/minio/minio/cmd/config/identity/openid" "github.com/minio/minio/cmd/crypto" xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/bucket/lifecycle" objectlock "github.com/minio/minio/pkg/bucket/object/lock" "github.com/minio/minio/pkg/bucket/policy" "github.com/minio/minio/pkg/bucket/replication" "github.com/minio/minio/pkg/etag" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/hash" iampolicy "github.com/minio/minio/pkg/iam/policy" "github.com/minio/minio/pkg/ioutil" "github.com/minio/minio/pkg/rpc/json2" ) func extractBucketObject(args reflect.Value) (bucketName, objectName string) { switch args.Kind() { case reflect.Ptr: a := args.Elem() for i := 0; i < a.NumField(); i++ { switch a.Type().Field(i).Name { case "BucketName": bucketName = a.Field(i).String() case "Prefix": objectName = a.Field(i).String() case "ObjectName": objectName = a.Field(i).String() } } } return bucketName, objectName } // WebGenericArgs - empty struct for calls that don't accept arguments // for ex. ServerInfo type WebGenericArgs struct{} // WebGenericRep - reply structure for calls for which reply is success/failure // for ex. RemoveObject MakeBucket type WebGenericRep struct { UIVersion string `json:"uiVersion"` } // ServerInfoRep - server info reply. type ServerInfoRep struct { MinioVersion string MinioMemory string MinioPlatform string MinioRuntime string MinioGlobalInfo map[string]interface{} MinioUserInfo map[string]interface{} UIVersion string `json:"uiVersion"` } // ServerInfo - get server info. func (web *webAPIHandlers) ServerInfo(r *http.Request, args *WebGenericArgs, reply *ServerInfoRep) error { ctx := newWebContext(r, args, "WebServerInfo") claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } host, err := os.Hostname() if err != nil { host = "" } platform := fmt.Sprintf("Host: %s | OS: %s | Arch: %s", host, runtime.GOOS, runtime.GOARCH) goruntime := fmt.Sprintf("Version: %s | CPUs: %d", runtime.Version(), runtime.NumCPU()) reply.MinioVersion = Version reply.MinioGlobalInfo = getGlobalInfo() // Check if the user is IAM user. reply.MinioUserInfo = map[string]interface{}{ "isIAMUser": !owner, } if !owner { creds, ok := globalIAMSys.GetUser(claims.AccessKey) if ok && creds.SessionToken != "" { reply.MinioUserInfo["isTempUser"] = true } } reply.MinioPlatform = platform reply.MinioRuntime = goruntime reply.UIVersion = Version return nil } // StorageInfoRep - contains storage usage statistics. type StorageInfoRep struct { Used uint64 `json:"used"` UIVersion string `json:"uiVersion"` } // StorageInfo - web call to gather storage usage statistics. func (web *webAPIHandlers) StorageInfo(r *http.Request, args *WebGenericArgs, reply *StorageInfoRep) error { ctx := newWebContext(r, args, "WebStorageInfo") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } _, _, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI) reply.Used = dataUsageInfo.ObjectsTotalSize reply.UIVersion = Version return nil } // MakeBucketArgs - make bucket args. type MakeBucketArgs struct { BucketName string `json:"bucketName"` } // MakeBucket - creates a new bucket. func (web *webAPIHandlers) MakeBucket(r *http.Request, args *MakeBucketArgs, reply *WebGenericRep) error { ctx := newWebContext(r, args, "WebMakeBucket") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // For authenticated users apply IAM policy. if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.CreateBucketAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, true) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } opts := BucketOptions{ Location: globalServerRegion, LockEnabled: false, } if globalDNSConfig != nil { if _, err := globalDNSConfig.Get(args.BucketName); err != nil { if err == dns.ErrNoEntriesFound || err == dns.ErrNotImplemented { // Proceed to creating a bucket. if err = objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err) } if err = globalDNSConfig.Put(args.BucketName); err != nil { objectAPI.DeleteBucket(ctx, args.BucketName, false) return toJSONError(ctx, err) } reply.UIVersion = Version return nil } return toJSONError(ctx, err) } return toJSONError(ctx, errBucketAlreadyExists) } if err := objectAPI.MakeBucketWithLocation(ctx, args.BucketName, opts); err != nil { return toJSONError(ctx, err, args.BucketName) } reply.UIVersion = Version reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey sendEvent(eventArgs{ EventName: event.BucketCreated, BucketName: args.BucketName, ReqParams: reqParams, UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) return nil } // RemoveBucketArgs - remove bucket args. type RemoveBucketArgs struct { BucketName string `json:"bucketName"` } // DeleteBucket - removes a bucket, must be empty. func (web *webAPIHandlers) DeleteBucket(r *http.Request, args *RemoveBucketArgs, reply *WebGenericRep) error { ctx := newWebContext(r, args, "WebDeleteBucket") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // For authenticated users apply IAM policy. if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.DeleteBucketAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } reply.UIVersion = Version if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } core, err := getRemoteInstanceClient(r, getHostFromSrv(sr)) if err != nil { return toJSONError(ctx, err, args.BucketName) } if err = core.RemoveBucket(ctx, args.BucketName); err != nil { return toJSONError(ctx, err, args.BucketName) } return nil } deleteBucket := objectAPI.DeleteBucket if err := deleteBucket(ctx, args.BucketName, false); err != nil { return toJSONError(ctx, err, args.BucketName) } globalNotificationSys.DeleteBucketMetadata(ctx, args.BucketName) if globalDNSConfig != nil { if err := globalDNSConfig.Delete(args.BucketName); err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually", err)) return toJSONError(ctx, err) } } reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey sendEvent(eventArgs{ EventName: event.BucketRemoved, BucketName: args.BucketName, ReqParams: reqParams, UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) return nil } // ListBucketsRep - list buckets response type ListBucketsRep struct { Buckets []WebBucketInfo `json:"buckets"` UIVersion string `json:"uiVersion"` } // WebBucketInfo container for list buckets metadata. type WebBucketInfo struct { // The name of the bucket. Name string `json:"name"` // Date the bucket was created. CreationDate time.Time `json:"creationDate"` } // ListBuckets - list buckets api. func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, reply *ListBucketsRep) error { ctx := newWebContext(r, args, "WebListBuckets") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } listBuckets := objectAPI.ListBuckets claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // Set prefix value for "s3:prefix" policy conditionals. r.Header.Set("prefix", "") // Set delimiter value for "s3:delimiter" policy conditionals. r.Header.Set("delimiter", SlashSeparator) // If etcd, dns federation configured list buckets from etcd. if globalDNSConfig != nil && globalBucketFederation { dnsBuckets, err := globalDNSConfig.List() if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrDomainMissing) { return toJSONError(ctx, err) } for _, dnsRecords := range dnsBuckets { if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.ListBucketAction, BucketName: dnsRecords[0].Key, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: "", Claims: claims.Map(), }) { reply.Buckets = append(reply.Buckets, WebBucketInfo{ Name: dnsRecords[0].Key, CreationDate: dnsRecords[0].CreationDate, }) } } } else { buckets, err := listBuckets(ctx) if err != nil { return toJSONError(ctx, err) } for _, bucket := range buckets { if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.ListBucketAction, BucketName: bucket.Name, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: "", Claims: claims.Map(), }) { reply.Buckets = append(reply.Buckets, WebBucketInfo{ Name: bucket.Name, CreationDate: bucket.Created, }) } } } reply.UIVersion = Version return nil } // ListObjectsArgs - list object args. type ListObjectsArgs struct { BucketName string `json:"bucketName"` Prefix string `json:"prefix"` Marker string `json:"marker"` } // ListObjectsRep - list objects response. type ListObjectsRep struct { Objects []WebObjectInfo `json:"objects"` Writable bool `json:"writable"` // Used by client to show "upload file" button. UIVersion string `json:"uiVersion"` } // WebObjectInfo container for list objects metadata. type WebObjectInfo struct { // Name of the object Key string `json:"name"` // Date and time the object was last modified. LastModified time.Time `json:"lastModified"` // Size in bytes of the object. Size int64 `json:"size"` // ContentType is mime type of the object. ContentType string `json:"contentType"` } // ListObjects - list objects api. func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, reply *ListObjectsRep) error { ctx := newWebContext(r, args, "WebListObjects") reply.UIVersion = Version objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } listObjects := objectAPI.ListObjects if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } core, err := getRemoteInstanceClient(r, getHostFromSrv(sr)) if err != nil { return toJSONError(ctx, err, args.BucketName) } nextMarker := "" // Fetch all the objects for { // Let listObjects reply back the maximum from server implementation result, err := core.ListObjects(args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000) if err != nil { return toJSONError(ctx, err, args.BucketName) } for _, obj := range result.Contents { reply.Objects = append(reply.Objects, WebObjectInfo{ Key: obj.Key, LastModified: obj.LastModified, Size: obj.Size, ContentType: obj.ContentType, }) } for _, p := range result.CommonPrefixes { reply.Objects = append(reply.Objects, WebObjectInfo{ Key: p.Prefix, }) } nextMarker = result.NextMarker // Return when there are no more objects if !result.IsTruncated { return nil } } } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { if authErr == errNoAuthToken { // Set prefix value for "s3:prefix" policy conditionals. r.Header.Set("prefix", args.Prefix) // Set delimiter value for "s3:delimiter" policy conditionals. r.Header.Set("delimiter", SlashSeparator) // Check if anonymous (non-owner) has access to download objects. readable := globalPolicySys.IsAllowed(policy.Args{ Action: policy.ListBucketAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, }) // Check if anonymous (non-owner) has access to upload objects. writable := globalPolicySys.IsAllowed(policy.Args{ Action: policy.PutObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: args.Prefix + SlashSeparator, }) reply.Writable = writable if !readable { // Error out if anonymous user (non-owner) has no access to download or upload objects if !writable { return errAccessDenied } // return empty object list if access is write only return nil } } else { return toJSONError(ctx, authErr) } } // For authenticated users apply IAM policy. if authErr == nil { // Set prefix value for "s3:prefix" policy conditionals. r.Header.Set("prefix", args.Prefix) // Set delimiter value for "s3:delimiter" policy conditionals. r.Header.Set("delimiter", SlashSeparator) readable := globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.ListBucketAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) writable := globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.PutObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: args.Prefix + SlashSeparator, Claims: claims.Map(), }) reply.Writable = writable if !readable { // Error out if anonymous user (non-owner) has no access to download or upload objects if !writable { return errAccessDenied } // return empty object list if access is write only return nil } } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } nextMarker := "" // Fetch all the objects for { // Limit browser to '1000' batches to be more responsive, scrolling friendly. // Also don't change the maxKeys value silly GCS SDKs do not honor maxKeys // values to be '-1' lo, err := listObjects(ctx, args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000) if err != nil { return &json2.Error{Message: err.Error()} } nextMarker = lo.NextMarker for i := range lo.Objects { lo.Objects[i].Size, err = lo.Objects[i].GetActualSize() if err != nil { return toJSONError(ctx, err) } } for _, obj := range lo.Objects { reply.Objects = append(reply.Objects, WebObjectInfo{ Key: obj.Name, LastModified: obj.ModTime, Size: obj.Size, ContentType: obj.ContentType, }) } for _, prefix := range lo.Prefixes { reply.Objects = append(reply.Objects, WebObjectInfo{ Key: prefix, }) } // Return when there are no more objects if !lo.IsTruncated { return nil } } } // RemoveObjectArgs - args to remove an object, JSON will look like. // // { // "bucketname": "testbucket", // "objects": [ // "photos/hawaii/", // "photos/maldives/", // "photos/sanjose.jpg" // ] // } type RemoveObjectArgs struct { Objects []string `json:"objects"` // Contains objects, prefixes. BucketName string `json:"bucketname"` // Contains bucket name. } // RemoveObject - removes an object, or all the objects at a given prefix. func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, reply *WebGenericRep) error { ctx := newWebContext(r, args, "WebRemoveObject") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } deleteObjects := objectAPI.DeleteObjects if web.CacheAPI() != nil { deleteObjects = web.CacheAPI().DeleteObjects } getObjectInfoFn := objectAPI.GetObjectInfo if web.CacheAPI() != nil { getObjectInfoFn = web.CacheAPI().GetObjectInfo } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { if authErr == errNoAuthToken { // Check if all objects are allowed to be deleted anonymously for _, object := range args.Objects { if !globalPolicySys.IsAllowed(policy.Args{ Action: policy.DeleteObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: object, }) { return toJSONError(ctx, errAuthentication) } } } else { return toJSONError(ctx, authErr) } } if args.BucketName == "" || len(args.Objects) == 0 { return toJSONError(ctx, errInvalidArgument) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } reply.UIVersion = Version if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } core, err := getRemoteInstanceClient(r, getHostFromSrv(sr)) if err != nil { return toJSONError(ctx, err, args.BucketName) } objectsCh := make(chan miniogo.ObjectInfo) // Send object names that are needed to be removed to objectsCh go func() { defer close(objectsCh) for _, objectName := range args.Objects { objectsCh <- miniogo.ObjectInfo{ Key: objectName, } } }() for resp := range core.RemoveObjects(ctx, args.BucketName, objectsCh, minio.RemoveObjectsOptions{}) { if resp.Err != nil { return toJSONError(ctx, resp.Err, args.BucketName, resp.ObjectName) } } return nil } opts := ObjectOptions{ Versioned: globalBucketVersioningSys.Enabled(args.BucketName), VersionSuspended: globalBucketVersioningSys.Suspended(args.BucketName), } var ( err error replicateSync bool ) reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey sourceIP := handlers.GetSourceIP(r) next: for _, objectName := range args.Objects { // If not a directory, remove the object. if !HasSuffix(objectName, SlashSeparator) && objectName != "" { // Check permissions for non-anonymous user. if authErr != errNoAuthToken { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.DeleteObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: objectName, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } } if authErr == errNoAuthToken { // Check if object is allowed to be deleted anonymously. if !globalPolicySys.IsAllowed(policy.Args{ Action: policy.DeleteObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: objectName, }) { return toJSONError(ctx, errAccessDenied) } } var ( replicateDel, hasLifecycleConfig bool goi ObjectInfo gerr error ) if _, err := globalBucketMetadataSys.GetLifecycleConfig(args.BucketName); err == nil { hasLifecycleConfig = true } if hasReplicationRules(ctx, args.BucketName, []ObjectToDelete{{ObjectName: objectName}}) || hasLifecycleConfig { goi, gerr = getObjectInfoFn(ctx, args.BucketName, objectName, opts) if replicateDel, replicateSync = checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ ObjectName: objectName, VersionID: goi.VersionID, }, goi, gerr); replicateDel { opts.DeleteMarkerReplicationStatus = string(replication.Pending) opts.DeleteMarker = true } } deleteObject := objectAPI.DeleteObject if web.CacheAPI() != nil { deleteObject = web.CacheAPI().DeleteObject } oi, err := deleteObject(ctx, args.BucketName, objectName, opts) if err != nil { switch err.(type) { case BucketNotFound: return toJSONError(ctx, err) } } if oi.Name == "" { logger.LogIf(ctx, err) continue } eventName := event.ObjectRemovedDelete if oi.DeleteMarker { eventName = event.ObjectRemovedDeleteMarkerCreated } // Notify object deleted event. sendEvent(eventArgs{ EventName: eventName, BucketName: args.BucketName, Object: oi, ReqParams: reqParams, UserAgent: r.UserAgent(), Host: sourceIP, }) if replicateDel { dobj := DeletedObjectVersionInfo{ DeletedObject: DeletedObject{ ObjectName: objectName, DeleteMarkerVersionID: oi.VersionID, DeleteMarkerReplicationStatus: string(oi.ReplicationStatus), DeleteMarkerMTime: DeleteMarkerMTime{oi.ModTime}, DeleteMarker: oi.DeleteMarker, VersionPurgeStatus: oi.VersionPurgeStatus, }, Bucket: args.BucketName, } scheduleReplicationDelete(ctx, dobj, objectAPI, replicateSync) } if goi.TransitionStatus == lifecycle.TransitionComplete { deleteTransitionedObject(ctx, objectAPI, args.BucketName, objectName, lifecycle.ObjectOpts{ Name: objectName, UserTags: goi.UserTags, VersionID: goi.VersionID, DeleteMarker: goi.DeleteMarker, TransitionStatus: goi.TransitionStatus, IsLatest: goi.IsLatest, }, false, true) } logger.LogIf(ctx, err) continue } if authErr == errNoAuthToken { // Check if object is allowed to be deleted anonymously if !globalPolicySys.IsAllowed(policy.Args{ Action: iampolicy.DeleteObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: objectName, }) { return toJSONError(ctx, errAccessDenied) } } else { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.DeleteObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: objectName, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } } // Allocate new results channel to receive ObjectInfo. objInfoCh := make(chan ObjectInfo) // Walk through all objects if err = objectAPI.Walk(ctx, args.BucketName, objectName, objInfoCh, ObjectOptions{}); err != nil { break next } for { var objects []ObjectToDelete for obj := range objInfoCh { if len(objects) == maxDeleteList { // Reached maximum delete requests, attempt a delete for now. break } if obj.ReplicationStatus == replication.Replica { if authErr == errNoAuthToken { // Check if object is allowed to be deleted anonymously if !globalPolicySys.IsAllowed(policy.Args{ Action: iampolicy.ReplicateDeleteAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: objectName, }) { return toJSONError(ctx, errAccessDenied) } } else { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.ReplicateDeleteAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: objectName, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } } } replicateDel, _ := checkReplicateDelete(ctx, args.BucketName, ObjectToDelete{ObjectName: obj.Name, VersionID: obj.VersionID}, obj, nil) // since versioned delete is not available on web browser, yet - this is a simple DeleteMarker replication objToDel := ObjectToDelete{ObjectName: obj.Name} if replicateDel { objToDel.DeleteMarkerReplicationStatus = string(replication.Pending) } objects = append(objects, objToDel) } // Nothing to do. if len(objects) == 0 { break next } // Deletes a list of objects. deletedObjects, errs := deleteObjects(ctx, args.BucketName, objects, opts) for i, err := range errs { if err != nil && !isErrObjectNotFound(err) { deletedObjects[i].DeleteMarkerReplicationStatus = objects[i].DeleteMarkerReplicationStatus deletedObjects[i].VersionPurgeStatus = objects[i].VersionPurgeStatus } if err != nil { logger.LogIf(ctx, err) break next } } // Notify deleted event for objects. for _, dobj := range deletedObjects { objInfo := ObjectInfo{ Name: dobj.ObjectName, VersionID: dobj.VersionID, } if dobj.DeleteMarker { objInfo = ObjectInfo{ Name: dobj.ObjectName, DeleteMarker: dobj.DeleteMarker, VersionID: dobj.DeleteMarkerVersionID, } } sendEvent(eventArgs{ EventName: event.ObjectRemovedDelete, BucketName: args.BucketName, Object: objInfo, ReqParams: reqParams, UserAgent: r.UserAgent(), Host: sourceIP, }) if dobj.DeleteMarkerReplicationStatus == string(replication.Pending) || dobj.VersionPurgeStatus == Pending { dv := DeletedObjectVersionInfo{ DeletedObject: dobj, Bucket: args.BucketName, } scheduleReplicationDelete(ctx, dv, objectAPI, replicateSync) } } } } if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { // Ignore object not found error. return toJSONError(ctx, err, args.BucketName, "") } return nil } // LoginArgs - login arguments. type LoginArgs struct { Username string `json:"username" form:"username"` Password string `json:"password" form:"password"` } // LoginRep - login reply. type LoginRep struct { Token string `json:"token"` UIVersion string `json:"uiVersion"` } // Login - user login handler. func (web *webAPIHandlers) Login(r *http.Request, args *LoginArgs, reply *LoginRep) error { ctx := newWebContext(r, args, "WebLogin") token, err := authenticateWeb(args.Username, args.Password) if err != nil { return toJSONError(ctx, err) } reply.Token = token reply.UIVersion = Version return nil } // SetAuthArgs - argument for SetAuth type SetAuthArgs struct { CurrentAccessKey string `json:"currentAccessKey"` CurrentSecretKey string `json:"currentSecretKey"` NewAccessKey string `json:"newAccessKey"` NewSecretKey string `json:"newSecretKey"` } // SetAuthReply - reply for SetAuth type SetAuthReply struct { Token string `json:"token"` UIVersion string `json:"uiVersion"` PeerErrMsgs map[string]string `json:"peerErrMsgs"` } // SetAuth - Set accessKey and secretKey credentials. func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthReply) error { ctx := newWebContext(r, args, "WebSetAuth") claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } if owner { // Owner is not allowed to change credentials through browser. return toJSONError(ctx, errChangeCredNotAllowed) } if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.CreateUserAdminAction, IsOwner: false, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), Claims: claims.Map(), DenyOnly: true, }) { return toJSONError(ctx, errChangeCredNotAllowed) } // for IAM users, access key cannot be updated // claims.AccessKey is used instead of accesskey from args prevCred, ok := globalIAMSys.GetUser(claims.AccessKey) if !ok { return errInvalidAccessKeyID } // Throw error when wrong secret key is provided if subtle.ConstantTimeCompare([]byte(prevCred.SecretKey), []byte(args.CurrentSecretKey)) != 1 { return errIncorrectCreds } creds, err := auth.CreateCredentials(claims.AccessKey, args.NewSecretKey) if err != nil { return toJSONError(ctx, err) } err = globalIAMSys.SetUserSecretKey(creds.AccessKey, creds.SecretKey) if err != nil { return toJSONError(ctx, err) } reply.Token, err = authenticateWeb(creds.AccessKey, creds.SecretKey) if err != nil { return toJSONError(ctx, err) } reply.UIVersion = Version return nil } // URLTokenReply contains the reply for CreateURLToken. type URLTokenReply struct { Token string `json:"token"` UIVersion string `json:"uiVersion"` } // CreateURLToken creates a URL token (short-lived) for GET requests. func (web *webAPIHandlers) CreateURLToken(r *http.Request, args *WebGenericArgs, reply *URLTokenReply) error { ctx := newWebContext(r, args, "WebCreateURLToken") claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } creds := globalActiveCred if !owner { var ok bool creds, ok = globalIAMSys.GetUser(claims.AccessKey) if !ok { return toJSONError(ctx, errInvalidAccessKeyID) } } if creds.SessionToken != "" { // Use the same session token for URL token. reply.Token = creds.SessionToken } else { token, err := authenticateURL(creds.AccessKey, creds.SecretKey) if err != nil { return toJSONError(ctx, err) } reply.Token = token } reply.UIVersion = Version return nil } // Upload - file upload handler. func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "WebUpload") // obtain the claims here if possible, for audit logging. claims, owner, authErr := webRequestAuthenticate(r) defer logger.AuditLog(ctx, w, r, claims.Map()) objectAPI := web.ObjectAPI() if objectAPI == nil { writeWebErrorResponse(w, errServerNotInitialized) return } vars := mux.Vars(r) bucket := vars["bucket"] object, err := unescapePath(vars["object"]) if err != nil { writeWebErrorResponse(w, err) return } retPerms := ErrAccessDenied holdPerms := ErrAccessDenied replPerms := ErrAccessDenied if authErr != nil { if authErr == errNoAuthToken { // Check if anonymous (non-owner) has access to upload objects. if !globalPolicySys.IsAllowed(policy.Args{ Action: policy.PutObjectAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: object, }) { writeWebErrorResponse(w, errAuthentication) return } } else { writeWebErrorResponse(w, authErr) return } } // For authenticated users apply IAM policy. if authErr == nil { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.PutObjectAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { writeWebErrorResponse(w, errAuthentication) return } if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.PutObjectRetentionAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { retPerms = ErrNone } if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.PutObjectLegalHoldAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { holdPerms = ErrNone } if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetReplicationConfigurationAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: "", Claims: claims.Map(), }) { replPerms = ErrNone } } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(bucket, false) { writeWebErrorResponse(w, errInvalidBucketName) return } // Check if bucket encryption is enabled _, err = globalBucketSSEConfigSys.Get(bucket) if (globalAutoEncryption || err == nil) && !crypto.SSEC.IsRequested(r.Header) { r.Header.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) } // Require Content-Length to be set in the request size := r.ContentLength if size < 0 { writeWebErrorResponse(w, errSizeUnspecified) return } if err := enforceBucketQuota(ctx, bucket, size); err != nil { writeWebErrorResponse(w, err) return } // Extract incoming metadata if any. metadata, err := extractMetadata(ctx, r) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } var pReader *PutObjReader var reader io.Reader = r.Body actualSize := size hashReader, err := hash.NewReader(reader, size, "", "", actualSize) if err != nil { writeWebErrorResponse(w, err) return } if objectAPI.IsCompressionSupported() && isCompressible(r.Header, object) && size > 0 { // Storing the compression metadata. metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10) actualReader, err := hash.NewReader(reader, actualSize, "", "", actualSize) if err != nil { writeWebErrorResponse(w, err) return } // Set compression metrics. size = -1 // Since compressed size is un-predictable. s2c := newS2CompressReader(actualReader, actualSize) defer s2c.Close() reader = etag.Wrap(s2c, actualReader) hashReader, err = hash.NewReader(reader, size, "", "", actualSize) if err != nil { writeWebErrorResponse(w, err) return } } mustReplicate, sync := mustReplicateWeb(ctx, r, bucket, object, metadata, "", replPerms) if mustReplicate { metadata[xhttp.AmzBucketReplicationStatus] = string(replication.Pending) } pReader = NewPutObjReader(hashReader) // get gateway encryption options opts, err := putOpts(ctx, r, bucket, object, metadata) if err != nil { writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return } if objectAPI.IsEncryptionSupported() { if _, ok := crypto.IsRequested(r.Header); ok && !HasSuffix(object, SlashSeparator) { // handle SSE requests var ( objectEncryptionKey crypto.ObjectKey encReader io.Reader ) encReader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } info := ObjectInfo{Size: size} // do not try to verify encrypted content hashReader, err = hash.NewReader(etag.Wrap(encReader, hashReader), info.EncryptedSize(), "", "", size) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } } } // Ensure that metadata does not contain sensitive information crypto.RemoveSensitiveEntries(metadata) putObject := objectAPI.PutObject getObjectInfo := objectAPI.GetObjectInfo if web.CacheAPI() != nil { putObject = web.CacheAPI().PutObject getObjectInfo = web.CacheAPI().GetObjectInfo } // enforce object retention rules retentionMode, retentionDate, _, s3Err := checkPutObjectLockAllowed(ctx, r, bucket, object, getObjectInfo, retPerms, holdPerms) if s3Err != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL, guessIsBrowserReq(r)) return } if retentionMode != "" { opts.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) opts.UserDefined[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = retentionDate.UTC().Format(iso8601TimeFormat) } objInfo, err := putObject(GlobalContext, bucket, object, pReader, opts) if err != nil { writeWebErrorResponse(w, err) return } if objectAPI.IsEncryptionSupported() { switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind { case crypto.S3: w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) case crypto.SSEC: w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) } } if mustReplicate { scheduleReplication(ctx, objInfo.Clone(), objectAPI, sync) } reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey // Notify object created event. sendEvent(eventArgs{ EventName: event.ObjectCreatedPut, BucketName: bucket, Object: objInfo, ReqParams: reqParams, RespElements: extractRespElements(w), UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) } // Download - file download handler. func (web *webAPIHandlers) Download(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "WebDownload") claims, owner, authErr := webTokenAuthenticate(r.URL.Query().Get("token")) defer logger.AuditLog(ctx, w, r, claims.Map()) objectAPI := web.ObjectAPI() if objectAPI == nil { writeWebErrorResponse(w, errServerNotInitialized) return } vars := mux.Vars(r) bucket := vars["bucket"] object, err := unescapePath(vars["object"]) if err != nil { writeWebErrorResponse(w, err) return } getRetPerms := ErrAccessDenied legalHoldPerms := ErrAccessDenied if authErr != nil { if authErr == errNoAuthToken { // Check if anonymous (non-owner) has access to download objects. if !globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: object, }) { writeWebErrorResponse(w, errAuthentication) return } if globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectRetentionAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: object, }) { getRetPerms = ErrNone } if globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectLegalHoldAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: object, }) { legalHoldPerms = ErrNone } } else { writeWebErrorResponse(w, authErr) return } } // For authenticated users apply IAM policy. if authErr == nil { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { writeWebErrorResponse(w, errAuthentication) return } if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectRetentionAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { getRetPerms = ErrNone } if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectLegalHoldAction, BucketName: bucket, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: object, Claims: claims.Map(), }) { legalHoldPerms = ErrNone } } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(bucket, false) { writeWebErrorResponse(w, errInvalidBucketName) return } getObjectNInfo := objectAPI.GetObjectNInfo if web.CacheAPI() != nil { getObjectNInfo = web.CacheAPI().GetObjectNInfo } var opts ObjectOptions gr, err := getObjectNInfo(ctx, bucket, object, nil, r.Header, readLock, opts) if err != nil { writeWebErrorResponse(w, err) return } defer gr.Close() objInfo := gr.ObjInfo // filter object lock metadata if permission does not permit objInfo.UserDefined = objectlock.FilterObjectLockMetadata(objInfo.UserDefined, getRetPerms != ErrNone, legalHoldPerms != ErrNone) if objectAPI.IsEncryptionSupported() { if _, err = DecryptObjectInfo(&objInfo, r); err != nil { writeWebErrorResponse(w, err) return } } // Set encryption response headers if objectAPI.IsEncryptionSupported() { switch kind, _ := crypto.IsEncrypted(objInfo.UserDefined); kind { case crypto.S3: w.Header().Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) case crypto.SSEC: w.Header().Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerAlgorithm)) w.Header().Set(xhttp.AmzServerSideEncryptionCustomerKeyMD5, r.Header.Get(xhttp.AmzServerSideEncryptionCustomerKeyMD5)) } } // Set Parts Count Header if opts.PartNumber > 0 && len(objInfo.Parts) > 0 { setPartsCountHeaders(w, objInfo) } if err = setObjectHeaders(w, objInfo, nil, opts); err != nil { writeWebErrorResponse(w, err) return } // Add content disposition. w.Header().Set(xhttp.ContentDisposition, fmt.Sprintf("attachment; filename=\"%s\"", path.Base(objInfo.Name))) setHeadGetRespHeaders(w, r.URL.Query()) httpWriter := ioutil.WriteOnClose(w) // Write object content to response body if _, err = io.Copy(httpWriter, gr); err != nil { if !httpWriter.HasWritten() { // write error response only if no data or headers has been written to client yet writeWebErrorResponse(w, err) } return } if err = httpWriter.Close(); err != nil { if !httpWriter.HasWritten() { // write error response only if no data or headers has been written to client yet writeWebErrorResponse(w, err) return } } reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey // Notify object accessed via a GET request. sendEvent(eventArgs{ EventName: event.ObjectAccessedGet, BucketName: bucket, Object: objInfo, ReqParams: reqParams, RespElements: extractRespElements(w), UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) } // DownloadZipArgs - Argument for downloading a bunch of files as a zip file. // JSON will look like: // '{"bucketname":"testbucket","prefix":"john/pics/","objects":["hawaii/","maldives/","sanjose.jpg"]}' type DownloadZipArgs struct { Objects []string `json:"objects"` // can be files or sub-directories Prefix string `json:"prefix"` // current directory in the browser-ui BucketName string `json:"bucketname"` // bucket name. } // Takes a list of objects and creates a zip file that sent as the response body. func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { host := handlers.GetSourceIP(r) claims, owner, authErr := webTokenAuthenticate(r.URL.Query().Get("token")) ctx := newContext(r, w, "WebDownloadZip") defer logger.AuditLog(ctx, w, r, claims.Map()) objectAPI := web.ObjectAPI() if objectAPI == nil { writeWebErrorResponse(w, errServerNotInitialized) return } // Auth is done after reading the body to accommodate for anonymous requests // when bucket policy is enabled. var args DownloadZipArgs tenKB := 10 * 1024 // To limit r.Body to take care of misbehaving anonymous client. decodeErr := json.NewDecoder(io.LimitReader(r.Body, int64(tenKB))).Decode(&args) if decodeErr != nil { writeWebErrorResponse(w, decodeErr) return } var getRetPerms []APIErrorCode var legalHoldPerms []APIErrorCode if authErr != nil { if authErr == errNoAuthToken { for _, object := range args.Objects { // Check if anonymous (non-owner) has access to download objects. if !globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: pathJoin(args.Prefix, object), }) { writeWebErrorResponse(w, errAuthentication) return } retentionPerm := ErrAccessDenied if globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectRetentionAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: pathJoin(args.Prefix, object), }) { retentionPerm = ErrNone } getRetPerms = append(getRetPerms, retentionPerm) legalHoldPerm := ErrAccessDenied if globalPolicySys.IsAllowed(policy.Args{ Action: policy.GetObjectLegalHoldAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", "", nil), IsOwner: false, ObjectName: pathJoin(args.Prefix, object), }) { legalHoldPerm = ErrNone } legalHoldPerms = append(legalHoldPerms, legalHoldPerm) } } else { writeWebErrorResponse(w, authErr) return } } // For authenticated users apply IAM policy. if authErr == nil { for _, object := range args.Objects { if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: pathJoin(args.Prefix, object), Claims: claims.Map(), }) { writeWebErrorResponse(w, errAuthentication) return } retentionPerm := ErrAccessDenied if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectRetentionAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: pathJoin(args.Prefix, object), Claims: claims.Map(), }) { retentionPerm = ErrNone } getRetPerms = append(getRetPerms, retentionPerm) legalHoldPerm := ErrAccessDenied if globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectLegalHoldAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: pathJoin(args.Prefix, object), Claims: claims.Map(), }) { legalHoldPerm = ErrNone } legalHoldPerms = append(legalHoldPerms, legalHoldPerm) } } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { writeWebErrorResponse(w, errInvalidBucketName) return } getObjectNInfo := objectAPI.GetObjectNInfo if web.CacheAPI() != nil { getObjectNInfo = web.CacheAPI().GetObjectNInfo } archive := zip.NewWriter(w) defer archive.Close() reqParams := extractReqParams(r) reqParams["accessKey"] = claims.AccessKey respElements := extractRespElements(w) for i, object := range args.Objects { // Writes compressed object file to the response. zipit := func(objectName string) error { var opts ObjectOptions gr, err := getObjectNInfo(ctx, args.BucketName, objectName, nil, r.Header, readLock, opts) if err != nil { return err } defer gr.Close() info := gr.ObjInfo // filter object lock metadata if permission does not permit info.UserDefined = objectlock.FilterObjectLockMetadata(info.UserDefined, getRetPerms[i] != ErrNone, legalHoldPerms[i] != ErrNone) // For reporting, set the file size to the uncompressed size. info.Size, err = info.GetActualSize() if err != nil { return err } header := &zip.FileHeader{ Name: strings.TrimPrefix(objectName, args.Prefix), Method: zip.Deflate, Flags: 1 << 11, Modified: info.ModTime, } if hasStringSuffixInSlice(info.Name, standardExcludeCompressExtensions) || hasPattern(standardExcludeCompressContentTypes, info.ContentType) { // We strictly disable compression for standard extensions/content-types. header.Method = zip.Store } writer, err := archive.CreateHeader(header) if err != nil { writeWebErrorResponse(w, errUnexpected) return err } httpWriter := ioutil.WriteOnClose(writer) // Write object content to response body if _, err = io.Copy(httpWriter, gr); err != nil { httpWriter.Close() if !httpWriter.HasWritten() { // write error response only if no data or headers has been written to client yet writeWebErrorResponse(w, err) } return err } if err = httpWriter.Close(); err != nil { if !httpWriter.HasWritten() { // write error response only if no data has been written to client yet writeWebErrorResponse(w, err) return err } } // Notify object accessed via a GET request. sendEvent(eventArgs{ EventName: event.ObjectAccessedGet, BucketName: args.BucketName, Object: info, ReqParams: reqParams, RespElements: respElements, UserAgent: r.UserAgent(), Host: host, }) return nil } if !HasSuffix(object, SlashSeparator) { // If not a directory, compress the file and write it to response. err := zipit(pathJoin(args.Prefix, object)) if err != nil { logger.LogIf(ctx, err) return } continue } objInfoCh := make(chan ObjectInfo) // Walk through all objects if err := objectAPI.Walk(ctx, args.BucketName, pathJoin(args.Prefix, object), objInfoCh, ObjectOptions{}); err != nil { logger.LogIf(ctx, err) continue } for obj := range objInfoCh { if err := zipit(obj.Name); err != nil { logger.LogIf(ctx, err) continue } } } } // GetBucketPolicyArgs - get bucket policy args. type GetBucketPolicyArgs struct { BucketName string `json:"bucketName"` Prefix string `json:"prefix"` } // GetBucketPolicyRep - get bucket policy reply. type GetBucketPolicyRep struct { UIVersion string `json:"uiVersion"` Policy miniogopolicy.BucketPolicy `json:"policy"` } // GetBucketPolicy - get bucket policy for the requested prefix. func (web *webAPIHandlers) GetBucketPolicy(r *http.Request, args *GetBucketPolicyArgs, reply *GetBucketPolicyRep) error { ctx := newWebContext(r, args, "WebGetBucketPolicy") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // For authenticated users apply IAM policy. if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetBucketPolicyAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } var policyInfo = &miniogopolicy.BucketAccessPolicy{Version: "2012-10-17"} if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } client, rerr := getRemoteInstanceClient(r, getHostFromSrv(sr)) if rerr != nil { return toJSONError(ctx, rerr, args.BucketName) } policyStr, err := client.GetBucketPolicy(ctx, args.BucketName) if err != nil { return toJSONError(ctx, rerr, args.BucketName) } bucketPolicy, err := policy.ParseConfig(strings.NewReader(policyStr), args.BucketName) if err != nil { return toJSONError(ctx, rerr, args.BucketName) } policyInfo, err = PolicyToBucketAccessPolicy(bucketPolicy) if err != nil { // This should not happen. return toJSONError(ctx, err, args.BucketName) } } else { bucketPolicy, err := globalPolicySys.Get(args.BucketName) if err != nil { if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(ctx, err, args.BucketName) } } policyInfo, err = PolicyToBucketAccessPolicy(bucketPolicy) if err != nil { // This should not happen. return toJSONError(ctx, err, args.BucketName) } } reply.UIVersion = Version reply.Policy = miniogopolicy.GetPolicy(policyInfo.Statements, args.BucketName, args.Prefix) return nil } // ListAllBucketPoliciesArgs - get all bucket policies. type ListAllBucketPoliciesArgs struct { BucketName string `json:"bucketName"` } // BucketAccessPolicy - Collection of canned bucket policy at a given prefix. type BucketAccessPolicy struct { Bucket string `json:"bucket"` Prefix string `json:"prefix"` Policy miniogopolicy.BucketPolicy `json:"policy"` } // ListAllBucketPoliciesRep - get all bucket policy reply. type ListAllBucketPoliciesRep struct { UIVersion string `json:"uiVersion"` Policies []BucketAccessPolicy `json:"policies"` } // ListAllBucketPolicies - get all bucket policy. func (web *webAPIHandlers) ListAllBucketPolicies(r *http.Request, args *ListAllBucketPoliciesArgs, reply *ListAllBucketPoliciesRep) error { ctx := newWebContext(r, args, "WebListAllBucketPolicies") objectAPI := web.ObjectAPI() if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // For authenticated users apply IAM policy. if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetBucketPolicyAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } var policyInfo = new(miniogopolicy.BucketAccessPolicy) if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } core, rerr := getRemoteInstanceClient(r, getHostFromSrv(sr)) if rerr != nil { return toJSONError(ctx, rerr, args.BucketName) } var policyStr string policyStr, err = core.Client.GetBucketPolicy(ctx, args.BucketName) if err != nil { return toJSONError(ctx, err, args.BucketName) } if policyStr != "" { if err = json.Unmarshal([]byte(policyStr), policyInfo); err != nil { return toJSONError(ctx, err, args.BucketName) } } } else { bucketPolicy, err := globalPolicySys.Get(args.BucketName) if err != nil { if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(ctx, err, args.BucketName) } } policyInfo, err = PolicyToBucketAccessPolicy(bucketPolicy) if err != nil { return toJSONError(ctx, err, args.BucketName) } } reply.UIVersion = Version for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, args.BucketName, "") { bucketName, objectPrefix := path2BucketObject(prefix) objectPrefix = strings.TrimSuffix(objectPrefix, "*") reply.Policies = append(reply.Policies, BucketAccessPolicy{ Bucket: bucketName, Prefix: objectPrefix, Policy: policy, }) } return nil } // SetBucketPolicyWebArgs - set bucket policy args. type SetBucketPolicyWebArgs struct { BucketName string `json:"bucketName"` Prefix string `json:"prefix"` Policy string `json:"policy"` } // SetBucketPolicy - set bucket policy. func (web *webAPIHandlers) SetBucketPolicy(r *http.Request, args *SetBucketPolicyWebArgs, reply *WebGenericRep) error { ctx := newWebContext(r, args, "WebSetBucketPolicy") objectAPI := web.ObjectAPI() reply.UIVersion = Version if objectAPI == nil { return toJSONError(ctx, errServerNotInitialized) } claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } // For authenticated users apply IAM policy. if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.PutBucketPolicyAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, Claims: claims.Map(), }) { return toJSONError(ctx, errAccessDenied) } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } policyType := miniogopolicy.BucketPolicy(args.Policy) if !policyType.IsValidBucketPolicy() { return &json2.Error{ Message: "Invalid policy type " + args.Policy, } } if isRemoteCallRequired(ctx, args.BucketName, objectAPI) { sr, err := globalDNSConfig.Get(args.BucketName) if err != nil { if err == dns.ErrNoEntriesFound { return toJSONError(ctx, BucketNotFound{ Bucket: args.BucketName, }, args.BucketName) } return toJSONError(ctx, err, args.BucketName) } core, rerr := getRemoteInstanceClient(r, getHostFromSrv(sr)) if rerr != nil { return toJSONError(ctx, rerr, args.BucketName) } var policyStr string // Use the abstracted API instead of core, such that // NoSuchBucketPolicy errors are automatically handled. policyStr, err = core.Client.GetBucketPolicy(ctx, args.BucketName) if err != nil { return toJSONError(ctx, err, args.BucketName) } var policyInfo = &miniogopolicy.BucketAccessPolicy{Version: "2012-10-17"} if policyStr != "" { if err = json.Unmarshal([]byte(policyStr), policyInfo); err != nil { return toJSONError(ctx, err, args.BucketName) } } policyInfo.Statements = miniogopolicy.SetPolicy(policyInfo.Statements, policyType, args.BucketName, args.Prefix) if len(policyInfo.Statements) == 0 { if err = core.SetBucketPolicy(ctx, args.BucketName, ""); err != nil { return toJSONError(ctx, err, args.BucketName) } return nil } bucketPolicy, err := BucketAccessPolicyToPolicy(policyInfo) if err != nil { // This should not happen. return toJSONError(ctx, err, args.BucketName) } policyData, err := json.Marshal(bucketPolicy) if err != nil { return toJSONError(ctx, err, args.BucketName) } if err = core.SetBucketPolicy(ctx, args.BucketName, string(policyData)); err != nil { return toJSONError(ctx, err, args.BucketName) } } else { bucketPolicy, err := globalPolicySys.Get(args.BucketName) if err != nil { if _, ok := err.(BucketPolicyNotFound); !ok { return toJSONError(ctx, err, args.BucketName) } } policyInfo, err := PolicyToBucketAccessPolicy(bucketPolicy) if err != nil { // This should not happen. return toJSONError(ctx, err, args.BucketName) } policyInfo.Statements = miniogopolicy.SetPolicy(policyInfo.Statements, policyType, args.BucketName, args.Prefix) if len(policyInfo.Statements) == 0 { if err = globalBucketMetadataSys.Update(args.BucketName, bucketPolicyConfig, nil); err != nil { return toJSONError(ctx, err, args.BucketName) } return nil } bucketPolicy, err = BucketAccessPolicyToPolicy(policyInfo) if err != nil { // This should not happen. return toJSONError(ctx, err, args.BucketName) } configData, err := json.Marshal(bucketPolicy) if err != nil { return toJSONError(ctx, err, args.BucketName) } // Parse validate and save bucket policy. if err = globalBucketMetadataSys.Update(args.BucketName, bucketPolicyConfig, configData); err != nil { return toJSONError(ctx, err, args.BucketName) } } return nil } // PresignedGetArgs - presigned-get API args. type PresignedGetArgs struct { // Host header required for signed headers. HostName string `json:"host"` // Bucket name of the object to be presigned. BucketName string `json:"bucket"` // Object name to be presigned. ObjectName string `json:"object"` // Expiry in seconds. Expiry int64 `json:"expiry"` } // PresignedGetRep - presigned-get URL reply. type PresignedGetRep struct { UIVersion string `json:"uiVersion"` // Presigned URL of the object. URL string `json:"url"` } // PresignedGET - returns presigned-Get url. func (web *webAPIHandlers) PresignedGet(r *http.Request, args *PresignedGetArgs, reply *PresignedGetRep) error { ctx := newWebContext(r, args, "WebPresignedGet") claims, owner, authErr := webRequestAuthenticate(r) if authErr != nil { return toJSONError(ctx, authErr) } var creds auth.Credentials if !owner { var ok bool creds, ok = globalIAMSys.GetUser(claims.AccessKey) if !ok { return toJSONError(ctx, errInvalidAccessKeyID) } } else { creds = globalActiveCred } region := globalServerRegion if args.BucketName == "" || args.ObjectName == "" { return &json2.Error{ Message: "Bucket and Object are mandatory arguments.", } } // Check if bucket is a reserved bucket name or invalid. if isReservedOrInvalidBucket(args.BucketName, false) { return toJSONError(ctx, errInvalidBucketName, args.BucketName) } // Check if the user indeed has GetObject access, // if not we do not need to generate presigned URLs if !globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.AccessKey, Action: iampolicy.GetObjectAction, BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.AccessKey, claims.Map()), IsOwner: owner, ObjectName: args.ObjectName, Claims: claims.Map(), }) { return toJSONError(ctx, errPresignedNotAllowed) } reply.UIVersion = Version reply.URL = presignedGet(args.HostName, args.BucketName, args.ObjectName, args.Expiry, creds, region) return nil } // Returns presigned url for GET method. func presignedGet(host, bucket, object string, expiry int64, creds auth.Credentials, region string) string { accessKey := creds.AccessKey secretKey := creds.SecretKey sessionToken := creds.SessionToken date := UTCNow() dateStr := date.Format(iso8601Format) credential := fmt.Sprintf("%s/%s", accessKey, getScope(date, region)) var expiryStr = "604800" // Default set to be expire in 7days. if expiry < 604800 && expiry > 0 { expiryStr = strconv.FormatInt(expiry, 10) } query := url.Values{} query.Set(xhttp.AmzAlgorithm, signV4Algorithm) query.Set(xhttp.AmzCredential, credential) query.Set(xhttp.AmzDate, dateStr) query.Set(xhttp.AmzExpires, expiryStr) query.Set(xhttp.ContentDisposition, fmt.Sprintf("attachment; filename=\"%s\"", object)) // Set session token if available. if sessionToken != "" { query.Set(xhttp.AmzSecurityToken, sessionToken) } query.Set(xhttp.AmzSignedHeaders, "host") queryStr := s3utils.QueryEncode(query) path := SlashSeparator + path.Join(bucket, object) // "host" is the only header required to be signed for Presigned URLs. extractedSignedHeaders := make(http.Header) extractedSignedHeaders.Set("host", host) canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, path, http.MethodGet) stringToSign := getStringToSign(canonicalRequest, date, getScope(date, region)) signingKey := getSigningKey(secretKey, date, region, serviceS3) signature := getSignature(signingKey, stringToSign) return host + s3utils.EncodePath(path) + "?" + queryStr + "&" + xhttp.AmzSignature + "=" + signature } // DiscoveryDocResp - OpenID discovery document reply. type DiscoveryDocResp struct { DiscoveryDoc openid.DiscoveryDoc UIVersion string `json:"uiVersion"` ClientID string `json:"clientId"` } // GetDiscoveryDoc - returns parsed value of OpenID discovery document func (web *webAPIHandlers) GetDiscoveryDoc(r *http.Request, args *WebGenericArgs, reply *DiscoveryDocResp) error { if globalOpenIDConfig.DiscoveryDoc.AuthEndpoint != "" { reply.DiscoveryDoc = globalOpenIDConfig.DiscoveryDoc reply.ClientID = globalOpenIDConfig.ClientID } reply.UIVersion = Version return nil } // LoginSTSArgs - login arguments. type LoginSTSArgs struct { Token string `json:"token" form:"token"` } var errSTSNotInitialized = errors.New("STS API not initialized, please configure STS support") // LoginSTS - STS user login handler. func (web *webAPIHandlers) LoginSTS(r *http.Request, args *LoginSTSArgs, reply *LoginRep) error { ctx := newWebContext(r, args, "WebLoginSTS") if globalOpenIDValidators == nil { return toJSONError(ctx, errSTSNotInitialized) } v, err := globalOpenIDValidators.Get("jwt") if err != nil { logger.LogIf(ctx, err) return toJSONError(ctx, errSTSNotInitialized) } m, err := v.Validate(args.Token, "") if err != nil { return toJSONError(ctx, err) } // JWT has requested a custom claim with policy value set. // This is a MinIO STS API specific value, this value should // be set and configured on your identity provider as part of // JWT custom claims. var policyName string policySet, ok := iampolicy.GetPoliciesFromClaims(m, iamPolicyClaimNameOpenID()) if ok { policyName = globalIAMSys.CurrentPolicies(strings.Join(policySet.ToSlice(), ",")) } if policyName == "" && globalPolicyOPA == nil { return toJSONError(ctx, fmt.Errorf("%s claim missing from the JWT token, credentials will not be generated", iamPolicyClaimNameOpenID())) } m[iamPolicyClaimNameOpenID()] = policyName secret := globalActiveCred.SecretKey cred, err := auth.GetNewCredentialsWithMetadata(m, secret) if err != nil { return toJSONError(ctx, err) } // Set the newly generated credentials. if err = globalIAMSys.SetTempUser(cred.AccessKey, cred, policyName); err != nil { return toJSONError(ctx, err) } // Notify all other MinIO peers to reload temp users for _, nerr := range globalNotificationSys.LoadUser(cred.AccessKey, true) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) logger.LogIf(ctx, nerr.Err) } } reply.Token = cred.SessionToken reply.UIVersion = Version return nil } // toJSONError converts regular errors into more user friendly // and consumable error message for the browser UI. func toJSONError(ctx context.Context, err error, params ...string) (jerr *json2.Error) { apiErr := toWebAPIError(ctx, err) jerr = &json2.Error{ Message: apiErr.Description, } switch apiErr.Code { // Reserved bucket name provided. case "AllAccessDisabled": if len(params) > 0 { jerr = &json2.Error{ Message: fmt.Sprintf("All access to this bucket %s has been disabled.", params[0]), } } // Bucket name invalid with custom error message. case "InvalidBucketName": if len(params) > 0 { jerr = &json2.Error{ Message: fmt.Sprintf("Bucket Name %s is invalid. Lowercase letters, period, hyphen, numerals are the only allowed characters and should be minimum 3 characters in length.", params[0]), } } // Bucket not found custom error message. case "NoSuchBucket": if len(params) > 0 { jerr = &json2.Error{ Message: fmt.Sprintf("The specified bucket %s does not exist.", params[0]), } } // Object not found custom error message. case "NoSuchKey": if len(params) > 1 { jerr = &json2.Error{ Message: fmt.Sprintf("The specified key %s does not exist", params[1]), } } // Add more custom error messages here with more context. } return jerr } // toWebAPIError - convert into error into APIError. func toWebAPIError(ctx context.Context, err error) APIError { switch err { case errNoAuthToken: return APIError{ Code: "WebTokenMissing", HTTPStatusCode: http.StatusBadRequest, Description: err.Error(), } case errSTSNotInitialized: return APIError(stsErrCodes.ToSTSErr(ErrSTSNotInitialized)) case errServerNotInitialized: return APIError{ Code: "XMinioServerNotInitialized", HTTPStatusCode: http.StatusServiceUnavailable, Description: err.Error(), } case errAuthentication, auth.ErrInvalidAccessKeyLength, auth.ErrInvalidSecretKeyLength, errInvalidAccessKeyID, errAccessDenied, errLockedObject: return APIError{ Code: "AccessDenied", HTTPStatusCode: http.StatusForbidden, Description: err.Error(), } case errSizeUnspecified: return APIError{ Code: "InvalidRequest", HTTPStatusCode: http.StatusBadRequest, Description: err.Error(), } case errChangeCredNotAllowed: return APIError{ Code: "MethodNotAllowed", HTTPStatusCode: http.StatusMethodNotAllowed, Description: err.Error(), } case errInvalidBucketName: return APIError{ Code: "InvalidBucketName", HTTPStatusCode: http.StatusBadRequest, Description: err.Error(), } case errInvalidArgument: return APIError{ Code: "InvalidArgument", HTTPStatusCode: http.StatusBadRequest, Description: err.Error(), } case errEncryptedObject: return getAPIError(ErrSSEEncryptedObject) case errInvalidEncryptionParameters: return getAPIError(ErrInvalidEncryptionParameters) case errObjectTampered: return getAPIError(ErrObjectTampered) case errMethodNotAllowed: return getAPIError(ErrMethodNotAllowed) } // Convert error type to api error code. switch err.(type) { case StorageFull: return getAPIError(ErrStorageFull) case BucketQuotaExceeded: return getAPIError(ErrAdminBucketQuotaExceeded) case BucketNotFound: return getAPIError(ErrNoSuchBucket) case BucketNotEmpty: return getAPIError(ErrBucketNotEmpty) case BucketExists: return getAPIError(ErrBucketAlreadyOwnedByYou) case BucketNameInvalid: return getAPIError(ErrInvalidBucketName) case hash.BadDigest: return getAPIError(ErrBadDigest) case IncompleteBody: return getAPIError(ErrIncompleteBody) case ObjectExistsAsDirectory: return getAPIError(ErrObjectExistsAsDirectory) case ObjectNotFound: return getAPIError(ErrNoSuchKey) case ObjectNameInvalid: return getAPIError(ErrNoSuchKey) case InsufficientWriteQuorum: return getAPIError(ErrWriteQuorum) case InsufficientReadQuorum: return getAPIError(ErrReadQuorum) case NotImplemented: return APIError{ Code: "NotImplemented", HTTPStatusCode: http.StatusBadRequest, Description: "Functionality not implemented", } } // Log unexpected and unhandled errors. logger.LogIf(ctx, err) return toAPIError(ctx, err) } // writeWebErrorResponse - set HTTP status code and write error description to the body. func writeWebErrorResponse(w http.ResponseWriter, err error)
{ reqInfo := &logger.ReqInfo{ DeploymentID: globalDeploymentID, } ctx := logger.SetReqInfo(GlobalContext, reqInfo) apiErr := toWebAPIError(ctx, err) w.WriteHeader(apiErr.HTTPStatusCode) w.Write([]byte(apiErr.Description)) }
parallel-actions.js
const microTasks = require('../src') microTasks.methodRegister('print', (message, time = 0) => { console.log('Start: ' + message) return new Promise((resolve) => { setTimeout(() => { console.log('End: ' + message) resolve()
microTasks.taskRun([ { method: 'print', params: 'Action 1' }, { parallel: true, actions: [ { method: 'print', params: ['Action 2.1', 1000] }, { method: 'print', params: ['Action 2.2', 3000] }, { method: 'print', params: ['Action 2.3', 2000] } ] }, { method: 'print', params: 'Action 3' } ])
}, time) }) })
indexer.py
# This is an edited version of https://github.com/minhptx/iswc-2016-semantic-labeling, which was edited to use it as a baseline for Tab2KG (https://github.com/sgottsch/Tab2KG). import logging from elasticsearch.exceptions import RequestError from elasticsearch.helpers import scan, bulk from lib.utils import get_index_name __author__ = "minh" class Indexer: def __init__(self, es): self.es = es def init_analyzers(self, index_config):
print("init_analyzers") print(index_config) print(get_index_name(index_config)) if(self.es.indices.exists(get_index_name(index_config))): self.es.indices.delete(index=get_index_name(index_config)) self.es.indices.create(index=get_index_name(index_config)) def index_column(self, column, source_name, index_config): body = column.to_json() body['source'] = source_name try: self.es.index(index=get_index_name(index_config), doc_type="service", body=body) return True except RequestError: print("Error") return False def index_source(self, source, index_config): # self.es.indices.put_mapping(index=get_index_name(index_config), doc_type="service", body={ # "service": { # "properties": { # "source": { # "type": "string", # "index": "not_analyzed" # } # } # } # }) for column in source.column_map.values(): if column.semantic_type: if len(column.value_list) > 0: successful = self.index_column(column, source.index_name, index_config) if(not successful): return False else: logging.warning("Indexer: IGNORE COLUMN `%s` in source `%s` because of empty values", column.name, source.name) return True def delete_column(self, attr_name, source_name, index_config): bulk_deletes = [] for result in scan(self.es, query={ "query": { "match": { "name": attr_name, } } }, index=get_index_name(index_config), doc_type="service", _source=False, track_scores=False, scroll='5m'): result['_op_type'] = 'delete' bulk_deletes.append(result) bulk(self.es, bulk_deletes)
0032_auto_20181112_2011.py
# Generated by Django 2.1 on 2018-11-13 01:11 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('hsse_api', '0031_auto_20181111_1424'), ] operations = [ migrations.AlterField( model_name='auditinspection', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='employeecommunityactivity', name='created_on', field=models.DateField(auto_now_add=True), ),
migrations.AlterField( model_name='environmentalindicator', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='monthlyreport', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='report', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='safetyactivity', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='site', name='created_on', field=models.DateField(auto_now_add=True), ), migrations.AlterField( model_name='user', name='created_on', field=models.DateField(auto_now_add=True), ), ]
win32.py
# win32.py - utility functions that use win32 API # # Copyright 2005-2009 Matt Mackall <[email protected]> and others # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. """Utility functions that use win32 API. Mark Hammond's win32all package allows better functionality on Windows. This module overrides definitions in util.py. If not available, import of this module will fail, and generic code will be used. """ import win32api import errno, os, sys, pywintypes, win32con, win32file, win32process import winerror, win32gui import osutil, encoding from win32com.shell import shell, shellcon def os_link(src, dst): try: win32file.CreateHardLink(dst, src) # CreateHardLink sometimes succeeds on mapped drives but # following nlinks() returns 1. Check it now and bail out. if nlinks(src) < 2: try: win32file.DeleteFile(dst) except: pass # Fake hardlinking error raise OSError(errno.EINVAL, 'Hardlinking not supported') except pywintypes.error, details: raise OSError(errno.EINVAL, 'target implements hardlinks improperly') except NotImplementedError: # Another fake error win Win98 raise OSError(errno.EINVAL, 'Hardlinking not supported') def _getfileinfo(pathname): """Return number of hardlinks for the given file.""" try: fh = win32file.CreateFile(pathname, win32file.GENERIC_READ, win32file.FILE_SHARE_READ, None, win32file.OPEN_EXISTING, 0, None) try: return win32file.GetFileInformationByHandle(fh) finally: fh.Close() except pywintypes.error: return None def nlinks(pathname): """Return number of hardlinks for the given file.""" res = _getfileinfo(pathname) if res is not None: return res[7] else: return os.lstat(pathname).st_nlink def samefile(fpath1, fpath2): """Returns whether fpath1 and fpath2 refer to the same file. This is only guaranteed to work for files, not directories.""" res1 = _getfileinfo(fpath1) res2 = _getfileinfo(fpath2) if res1 is not None and res2 is not None: # Index 4 is the volume serial number, and 8 and 9 contain the file ID return res1[4] == res2[4] and res1[8] == res2[8] and res1[9] == res2[9] else: return False def samedevice(fpath1, fpath2): """Returns whether fpath1 and fpath2 are on the same device. This is only guaranteed to work for files, not directories.""" res1 = _getfileinfo(fpath1) res2 = _getfileinfo(fpath2) if res1 is not None and res2 is not None: return res1[4] == res2[4] else: return False def testpid(pid): '''return True if pid is still running or unable to determine, False otherwise''' try: handle = win32api.OpenProcess( win32con.PROCESS_QUERY_INFORMATION, False, pid) if handle: status = win32process.GetExitCodeProcess(handle) return status == win32con.STILL_ACTIVE except pywintypes.error, details: return details[0] != winerror.ERROR_INVALID_PARAMETER return True def lookup_reg(key, valname=None, scope=None): ''' Look up a key/value name in the Windows registry. valname: value name. If unspecified, the default value for the key is used. scope: optionally specify scope for registry lookup, this can be a sequence of scopes to look up in order. Default (CURRENT_USER, LOCAL_MACHINE). ''' try: from _winreg import HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE, \ QueryValueEx, OpenKey except ImportError: return None if scope is None: scope = (HKEY_CURRENT_USER, HKEY_LOCAL_MACHINE) elif not isinstance(scope, (list, tuple)): scope = (scope,) for s in scope: try: val = QueryValueEx(OpenKey(s, key), valname)[0] # never let a Unicode string escape into the wild return encoding.tolocal(val.encode('UTF-8')) except EnvironmentError: pass def system_rcpath_win32(): '''return default os-specific hgrc search path''' proc = win32api.GetCurrentProcess() try: # This will fail on windows < NT filename = win32process.GetModuleFileNameEx(proc, 0) except: filename = win32api.GetModuleFileName(0) # Use mercurial.ini found in directory with hg.exe progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') if os.path.isfile(progrc): return [progrc] # Use hgrc.d found in directory with hg.exe progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d') if os.path.isdir(progrcd): rcpath = [] for f, kind in osutil.listdir(progrcd): if f.endswith('.rc'): rcpath.append(os.path.join(progrcd, f)) return rcpath # else look for a system rcpath in the registry try: value = win32api.RegQueryValue( win32con.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Mercurial') rcpath = [] for p in value.split(os.pathsep): if p.lower().endswith('mercurial.ini'): rcpath.append(p) elif os.path.isdir(p): for f, kind in osutil.listdir(p): if f.endswith('.rc'): rcpath.append(os.path.join(p, f)) return rcpath except pywintypes.error: return [] def user_rcpath_win32(): '''return os-specific hgrc search path to the user dir''' userdir = os.path.expanduser('~') if sys.getwindowsversion()[3] != 2 and userdir == '~': # We are on win < nt: fetch the APPDATA directory location and use # the parent directory as the user home dir. appdir = shell.SHGetPathFromIDList( shell.SHGetSpecialFolderLocation(0, shellcon.CSIDL_APPDATA)) userdir = os.path.dirname(appdir) return [os.path.join(userdir, 'mercurial.ini'), os.path.join(userdir, '.hgrc')] def getuser(): '''return name of current user''' return win32api.GetUserName() def
(): """Register a termination handler for console events including CTRL+C. python signal handlers do not work well with socket operations. """ def handler(event): win32process.ExitProcess(1) win32api.SetConsoleCtrlHandler(handler) def hidewindow(): def callback(*args, **kwargs): hwnd, pid = args wpid = win32process.GetWindowThreadProcessId(hwnd)[1] if pid == wpid: win32gui.ShowWindow(hwnd, win32con.SW_HIDE) pid = win32process.GetCurrentProcessId() win32gui.EnumWindows(callback, pid)
set_signal_handler_win32
compiler.ts
interface Action { type: string; payload: Record<string, any>; } export const compilerInitialState = { compiler: { mode: '',
editor: { mode: '' } } export const compilerReducer = (state = compilerInitialState, action: Action) => { switch (action.type) { case 'SET_COMPILER_MODE': { return { ...state, compiler: { ...state.compiler, mode: action.payload.mode, args: action.payload.args || null } } } case 'RESET_COMPILER_MODE': { return { ...state, compiler: { ...state.compiler, mode: '', args: null } } } case 'SET_EDITOR_MODE': { return { ...state, editor: { ...state.editor, mode: action.payload } } } case 'RESET_EDITOR_MODE': { return { ...state, editor: { ...state.editor, mode: '' } } } default: throw new Error() } }
args: null },
index.js
/* eslint-disable no-undef */ /* eslint-disable no-console */ let net = require('net'); let socks = require('./socks'); let server = net.createServer(); function info(tag, msg) { console.log(`${tag}:${JSON.stringify(msg)}`); } server.on('connection', client => { let state = 1; let remote1 = null; //let remote2 = null; let msg = null; //client.setTimeout(0); client.on('data', buffer => { switch (state) { case 1: msg = socks.versionMethod(buffer); info('received version method', msg); client.write(socks.versionMethodReply()); state = 2; break; case 2: msg = socks.requestDetail(buffer); info('received client request',msg); remote1 = net.createConnection(msg.DST_PORT,msg.DST_ADDR); remote1.on('connect',()=>{ console.log(`connect to ${msg.DST_ADDR}:${msg.DST_PORT} successfully`); console.log('proxy address:' + remote1.localAddress + ':' + remote1.localPort); let localAddress = remote1.localAddress.split('.').reduce((a,b)=>{ a.push(parseInt(b)); return a; },[]); let localPort = remote1.localPort; // let remoteAddress = remote1.remoteAddress.split('.').reduce((a,b)=>{ // a.push(parseInt(b)); // return a; // },[]); // let remotePort = remote1.remotePort; switch(msg.CMD){ case 1: //client.write(socks.replyRequest(0,1,localAddress,localPort)); client.write(socks.replyRequest(0,1,localAddress,localPort)); break; case 3:
break; case 4: break; } state = 3; }); remote1.on('data',data=>{ //console.log('received:'+ data.byteLength); client.write(data); }); remote1.on('error',(error)=>{ client.end(); console.log(`connect to ${msg.DST_ADDR}:${msg.DST_PORT} failed. \n${error}`); }); remote1.on('end',()=>{ client.end(); console.log(`connect to ${msg.DST_ADDR}:${msg.DST_PORT} end.`); }); break; case 3: remote1.write(buffer); //console.log('sent:' + buffer.byteLength); break; } }); client.on('end', () => { remote1.end(); console.log('received FIN packet'); }); client.on('timeout',()=>{ console.log('client is timeout'); }); client.on('error',(error)=>{ remote1.end(); console.log('client error:'+ error); }); }); server.on('close',()=>{ console.log('server closed'); }); server.on('error', (error) => { console.log(error.stack); }); server.listen(parseInt(process.argv[2]), () => { console.log(`server started on ${server.address().port}`); });
//connection.write(socks.replyRequest(0,msg.ATYP,remoteAddress,remotePort));
containerd.go
/* Copyright 2019 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cruntime import ( "bytes" "encoding/base64" "encoding/json" "fmt" "net/url" "os" "os/exec" "path" "strings" "text/template" "time" "github.com/blang/semver" "github.com/pkg/errors" "k8s.io/klog/v2" "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/bootstrapper/images" "k8s.io/minikube/pkg/minikube/cni" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/download" "k8s.io/minikube/pkg/minikube/style" "k8s.io/minikube/pkg/minikube/sysinit" ) const ( containerdNamespaceRoot = "/run/containerd/runc/k8s.io" // ContainerdConfFile is the path to the containerd configuration containerdConfigFile = "/etc/containerd/config.toml" containerdConfigTemplate = `root = "/var/lib/containerd" state = "/run/containerd" oom_score = 0 [grpc] address = "/run/containerd/containerd.sock" uid = 0 gid = 0 max_recv_message_size = 16777216 max_send_message_size = 16777216 [debug] address = "" uid = 0 gid = 0 level = "" [metrics] address = "" grpc_histogram = false [cgroup] path = "" [plugins]
[plugins.cri] stream_server_address = "" stream_server_port = "10010" enable_selinux = false sandbox_image = "{{ .PodInfraContainerImage }}" stats_collect_period = 10 systemd_cgroup = {{ .SystemdCgroup }} enable_tls_streaming = false max_container_log_line_size = 16384 [plugins.cri.containerd] snapshotter = "overlayfs" no_pivot = true [plugins.cri.containerd.default_runtime] runtime_type = "io.containerd.runtime.v1.linux" runtime_engine = "" runtime_root = "" [plugins.cri.containerd.untrusted_workload_runtime] runtime_type = "" runtime_engine = "" runtime_root = "" [plugins.cri.cni] bin_dir = "/opt/cni/bin" conf_dir = "{{.CNIConfDir}}" conf_template = "" [plugins.cri.registry] [plugins.cri.registry.mirrors] [plugins.cri.registry.mirrors."docker.io"] endpoint = ["https://registry-1.docker.io"] {{ range .InsecureRegistry -}} [plugins.cri.registry.mirrors."{{. -}}"] endpoint = ["http://{{. -}}"] {{ end -}} [plugins.diff-service] default = ["walking"] [plugins.linux] shim = "containerd-shim" runtime = "runc" runtime_root = "" no_shim = false shim_debug = false [plugins.scheduler] pause_threshold = 0.02 deletion_threshold = 0 mutation_threshold = 100 schedule_delay = "0s" startup_delay = "100ms" ` ) // Containerd contains containerd runtime state type Containerd struct { Socket string Runner CommandRunner ImageRepository string KubernetesVersion semver.Version Init sysinit.Manager InsecureRegistry []string } // Name is a human readable name for containerd func (r *Containerd) Name() string { return "containerd" } // Style is the console style for containerd func (r *Containerd) Style() style.Enum { return style.Containerd } // Version retrieves the current version of this runtime func (r *Containerd) Version() (string, error) { c := exec.Command("containerd", "--version") rr, err := r.Runner.RunCmd(c) if err != nil { return "", errors.Wrapf(err, "containerd check version.") } // containerd github.com/containerd/containerd v1.2.0 c4446665cb9c30056f4998ed953e6d4ff22c7c39 words := strings.Split(rr.Stdout.String(), " ") if len(words) >= 4 && words[0] == "containerd" { return strings.Replace(words[2], "v", "", 1), nil } return "", fmt.Errorf("unknown version: %q", rr.Stdout.String()) } // SocketPath returns the path to the socket file for containerd func (r *Containerd) SocketPath() string { if r.Socket != "" { return r.Socket } return "/run/containerd/containerd.sock" } // Active returns if containerd is active on the host func (r *Containerd) Active() bool { return r.Init.Active("containerd") } // Available returns an error if it is not possible to use this runtime on a host func (r *Containerd) Available() error { c := exec.Command("which", "containerd") if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "check containerd availability.") } return nil } // generateContainerdConfig sets up /etc/containerd/config.toml func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version, forceSystemd bool, insecureRegistry []string) error { cPath := containerdConfigFile t, err := template.New("containerd.config.toml").Parse(containerdConfigTemplate) if err != nil { return err } pauseImage := images.Pause(kv, imageRepository) opts := struct { PodInfraContainerImage string SystemdCgroup bool InsecureRegistry []string CNIConfDir string }{ PodInfraContainerImage: pauseImage, SystemdCgroup: forceSystemd, InsecureRegistry: insecureRegistry, CNIConfDir: cni.ConfDir, } var b bytes.Buffer if err := t.Execute(&b, opts); err != nil { return err } c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mkdir -p %s && printf %%s \"%s\" | base64 -d | sudo tee %s", path.Dir(cPath), base64.StdEncoding.EncodeToString(b.Bytes()), cPath)) if _, err := cr.RunCmd(c); err != nil { return errors.Wrap(err, "generate containerd cfg.") } return nil } // Enable idempotently enables containerd on a host func (r *Containerd) Enable(disOthers, forceSystemd bool) error { if disOthers { if err := disableOthers(r, r.Runner); err != nil { klog.Warningf("disableOthers: %v", err) } } if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil { return err } if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion, forceSystemd, r.InsecureRegistry); err != nil { return err } if err := enableIPForwarding(r.Runner); err != nil { return err } // Otherwise, containerd will fail API requests with 'Unimplemented' return r.Init.Restart("containerd") } // Disable idempotently disables containerd on a host func (r *Containerd) Disable() error { return r.Init.ForceStop("containerd") } // ImageExists checks if an image exists, expected input format func (r *Containerd) ImageExists(name string, sha string) bool { c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo ctr -n=k8s.io images check | grep %s | grep %s", name, sha)) if _, err := r.Runner.RunCmd(c); err != nil { return false } return true } // ListImages lists images managed by this container runtime func (r *Containerd) ListImages(ListImagesOptions) ([]string, error) { c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "list", "--quiet") rr, err := r.Runner.RunCmd(c) if err != nil { return nil, errors.Wrapf(err, "ctr images list") } all := strings.Split(rr.Stdout.String(), "\n") imgs := []string{} for _, img := range all { if img == "" || strings.Contains(img, "sha256:") { continue } imgs = append(imgs, img) } return imgs, nil } // LoadImage loads an image into this runtime func (r *Containerd) LoadImage(path string) error { klog.Infof("Loading image: %s", path) c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "import", path) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrapf(err, "ctr images import") } return nil } // PullImage pulls an image into this runtime func (r *Containerd) PullImage(name string) error { return pullCRIImage(r.Runner, name) } // SaveImage save an image from this runtime func (r *Containerd) SaveImage(name string, path string) error { klog.Infof("Saving image %s: %s", name, path) c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "export", path, name) if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrapf(err, "ctr images export") } return nil } // RemoveImage removes a image func (r *Containerd) RemoveImage(name string) error { return removeCRIImage(r.Runner, name) } func gitClone(cr CommandRunner, src string) (string, error) { // clone to a temporary directory rr, err := cr.RunCmd(exec.Command("mktemp", "-d")) if err != nil { return "", err } tmp := strings.TrimSpace(rr.Stdout.String()) cmd := exec.Command("git", "clone", src, tmp) if _, err := cr.RunCmd(cmd); err != nil { return "", err } return tmp, nil } func downloadRemote(cr CommandRunner, src string) (string, error) { u, err := url.Parse(src) if err != nil { return "", err } if u.Scheme == "" && u.Host == "" { // regular file, return return src, nil } if u.Scheme == "git" { return gitClone(cr, src) } // download to a temporary file rr, err := cr.RunCmd(exec.Command("mktemp")) if err != nil { return "", err } dst := strings.TrimSpace(rr.Stdout.String()) cmd := exec.Command("curl", "-L", "-o", dst, src) if _, err := cr.RunCmd(cmd); err != nil { return "", err } // extract to a temporary directory rr, err = cr.RunCmd(exec.Command("mktemp", "-d")) if err != nil { return "", err } tmp := strings.TrimSpace(rr.Stdout.String()) cmd = exec.Command("tar", "-C", tmp, "-xf", dst) if _, err := cr.RunCmd(cmd); err != nil { return "", err } return tmp, nil } // BuildImage builds an image into this runtime func (r *Containerd) BuildImage(src string, file string, tag string, push bool, env []string, opts []string) error { // download url if not already present dir, err := downloadRemote(r.Runner, src) if err != nil { return err } if file != "" { if dir != src { file = path.Join(dir, file) } // copy to standard path for Dockerfile df := path.Join(dir, "Dockerfile") if file != df { cmd := exec.Command("sudo", "cp", "-f", file, df) if _, err := r.Runner.RunCmd(cmd); err != nil { return err } } } klog.Infof("Building image: %s", dir) extra := "" if tag != "" { // add default tag if missing if !strings.Contains(tag, ":") { tag += ":latest" } extra = fmt.Sprintf(",name=%s", tag) if push { extra += ",push=true" } } args := []string{"buildctl", "build", "--frontend", "dockerfile.v0", "--local", fmt.Sprintf("context=%s", dir), "--local", fmt.Sprintf("dockerfile=%s", dir), "--output", fmt.Sprintf("type=image%s", extra)} for _, opt := range opts { args = append(args, "--"+opt) } c := exec.Command("sudo", args...) e := os.Environ() e = append(e, env...) c.Env = e c.Stdout = os.Stdout c.Stderr = os.Stderr if _, err := r.Runner.RunCmd(c); err != nil { return errors.Wrap(err, "buildctl build.") } return nil } // CGroupDriver returns cgroup driver ("cgroupfs" or "systemd") func (r *Containerd) CGroupDriver() (string, error) { info, err := getCRIInfo(r.Runner) if err != nil { return "", err } if info["config"] == nil { return "", errors.Wrapf(err, "missing config") } config, ok := info["config"].(map[string]interface{}) if !ok { return "", errors.Wrapf(err, "config not map") } cgroupManager := "cgroupfs" // default switch config["systemdCgroup"] { case false: cgroupManager = "cgroupfs" case true: cgroupManager = "systemd" } return cgroupManager, nil } // KubeletOptions returns kubelet options for a containerd func (r *Containerd) KubeletOptions() map[string]string { return map[string]string{ "container-runtime": "remote", "container-runtime-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()), "image-service-endpoint": fmt.Sprintf("unix://%s", r.SocketPath()), "runtime-request-timeout": "15m", } } // ListContainers returns a list of managed by this container runtime func (r *Containerd) ListContainers(o ListContainersOptions) ([]string, error) { return listCRIContainers(r.Runner, containerdNamespaceRoot, o) } // PauseContainers pauses a running container based on ID func (r *Containerd) PauseContainers(ids []string) error { return pauseCRIContainers(r.Runner, containerdNamespaceRoot, ids) } // UnpauseContainers unpauses a running container based on ID func (r *Containerd) UnpauseContainers(ids []string) error { return unpauseCRIContainers(r.Runner, containerdNamespaceRoot, ids) } // KillContainers removes containers based on ID func (r *Containerd) KillContainers(ids []string) error { return killCRIContainers(r.Runner, ids) } // StopContainers stops containers based on ID func (r *Containerd) StopContainers(ids []string) error { return stopCRIContainers(r.Runner, ids) } // ContainerLogCmd returns the command to retrieve the log for a container based on ID func (r *Containerd) ContainerLogCmd(id string, len int, follow bool) string { return criContainerLogCmd(r.Runner, id, len, follow) } // SystemLogCmd returns the command to retrieve system logs func (r *Containerd) SystemLogCmd(len int) string { return fmt.Sprintf("sudo journalctl -u containerd -n %d", len) } // Preload preloads the container runtime with k8s images func (r *Containerd) Preload(cfg config.KubernetesConfig) error { if !download.PreloadExists(cfg.KubernetesVersion, cfg.ContainerRuntime) { return nil } k8sVersion := cfg.KubernetesVersion cRuntime := cfg.ContainerRuntime // If images already exist, return images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion) if err != nil { return errors.Wrap(err, "getting images") } if containerdImagesPreloaded(r.Runner, images) { klog.Info("Images already preloaded, skipping extraction") return nil } tarballPath := download.TarballPath(k8sVersion, cRuntime) targetDir := "/" targetName := "preloaded.tar.lz4" dest := path.Join(targetDir, targetName) c := exec.Command("which", "lz4") if _, err := r.Runner.RunCmd(c); err != nil { return NewErrISOFeature("lz4") } // Copy over tarball into host fa, err := assets.NewFileAsset(tarballPath, targetDir, targetName, "0644") if err != nil { return errors.Wrap(err, "getting file asset") } defer func() { if err := fa.Close(); err != nil { klog.Warningf("error closing the file %s: %v", fa.GetSourcePath(), err) } }() t := time.Now() if err := r.Runner.Copy(fa); err != nil { return errors.Wrap(err, "copying file") } klog.Infof("Took %f seconds to copy over tarball", time.Since(t).Seconds()) t = time.Now() // extract the tarball to /var in the VM if rr, err := r.Runner.RunCmd(exec.Command("sudo", "tar", "-I", "lz4", "-C", "/var", "-xf", dest)); err != nil { return errors.Wrapf(err, "extracting tarball: %s", rr.Output()) } klog.Infof("Took %f seconds t extract the tarball", time.Since(t).Seconds()) // remove the tarball in the VM if err := r.Runner.Remove(fa); err != nil { klog.Infof("error removing tarball: %v", err) } return r.Restart() } // Restart restarts Docker on a host func (r *Containerd) Restart() error { return r.Init.Restart("containerd") } // containerdImagesPreloaded returns true if all images have been preloaded func containerdImagesPreloaded(runner command.Runner, images []string) bool { rr, err := runner.RunCmd(exec.Command("sudo", "crictl", "images", "--output", "json")) if err != nil { return false } type crictlImages struct { Images []struct { ID string `json:"id"` RepoTags []string `json:"repoTags"` RepoDigests []string `json:"repoDigests"` Size string `json:"size"` UID interface{} `json:"uid"` Username string `json:"username"` } `json:"images"` } var jsonImages crictlImages err = json.Unmarshal(rr.Stdout.Bytes(), &jsonImages) if err != nil { klog.Errorf("failed to unmarshal images, will assume images are not preloaded") return false } // Make sure images == imgs for _, i := range images { found := false for _, ji := range jsonImages.Images { for _, rt := range ji.RepoTags { i = addRepoTagToImageName(i) if i == rt { found = true break } } if found { break } } if !found { klog.Infof("couldn't find preloaded image for %q. assuming images are not preloaded.", i) return false } } klog.Infof("all images are preloaded for containerd runtime.") return true } // ImagesPreloaded returns true if all images have been preloaded func (r *Containerd) ImagesPreloaded(images []string) bool { return containerdImagesPreloaded(r.Runner, images) }
[plugins.cgroups] no_prometheus = false
rtree.go
// Copyright 2012 Daniel Connelly. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // A library for efficiently storing and querying spatial data. package rtreego import ( "fmt" "math" "sort" ) const Dim = 3 // Rtree represents an R-tree, a balanced search tree for storing and querying // spatial objects. MinChildren/MaxChildren specify the minimum/maximum branching factors. type Rtree struct { MinChildren int MaxChildren int root *node size int height int } // NewTree creates a new R-tree instance. func NewTree(MinChildren, MaxChildren int) *Rtree { rt := Rtree{MinChildren: MinChildren, MaxChildren: MaxChildren} rt.height = 1 rt.root = &node{} rt.root.entries = make([]entry, 0, MaxChildren) rt.root.leaf = true rt.root.level = 1 return &rt } // Size returns the number of objects currently stored in tree. func (tree *Rtree) Size() int { return tree.size } func (tree *Rtree) String() string { return "(*Rtree)" } // Depth returns the maximum depth of tree. func (tree *Rtree) Depth() int { return tree.height } // node represents a tree node of an Rtree. type node struct { parent *node leaf bool entries []entry level int // node depth in the Rtree } func (n *node) String() string { return fmt.Sprintf("node{leaf: %v, entries: %v}", n.leaf, n.entries) } // entry represents a spatial index record stored in a tree node. type entry struct { bb *Rect // bounding-box of all children of this entry child *node obj Spatial } func (e entry) String() string { if e.child != nil { return fmt.Sprintf("entry{bb: %v, child: %v}", e.bb, e.child) } return fmt.Sprintf("entry{bb: %v, obj: %v}", e.bb, e.obj) } // Any type that implements Spatial can be stored in an Rtree and queried. type Spatial interface { Bounds() *Rect } // Insertion // Insert inserts a spatial object into the tree. If insertion // causes a leaf node to overflow, the tree is rebalanced automatically. // // Implemented per Section 3.2 of "R-trees: A Dynamic Index Structure for // Spatial Searching" by A. Guttman, Proceedings of ACM SIGMOD, p. 47-57, 1984. func (tree *Rtree) Insert(obj Spatial) { e := entry{obj.Bounds(), nil, obj} tree.insert(e, 1) tree.size++ } // insert adds the specified entry to the tree at the specified level. func (tree *Rtree) insert(e entry, level int) { leaf := tree.chooseNode(tree.root, e, level) leaf.entries = append(leaf.entries, e) // update parent pointer if necessary if e.child != nil { e.child.parent = leaf } // split leaf if overflows var split *node if len(leaf.entries) > tree.MaxChildren { leaf, split = leaf.split(tree.MinChildren) } root, splitRoot := tree.adjustTree(leaf, split) if splitRoot != nil { oldRoot := root tree.height++ tree.root = &node{ parent: nil, level: tree.height, entries: []entry{ entry{bb: oldRoot.computeBoundingBox(), child: oldRoot}, entry{bb: splitRoot.computeBoundingBox(), child: splitRoot}, }, } oldRoot.parent = tree.root splitRoot.parent = tree.root } } // chooseNode finds the node at the specified level to which e should be added. func (tree *Rtree) chooseNode(n *node, e entry, level int) *node { if n.leaf || n.level == level { return n } // find the entry whose bb needs least enlargement to include obj diff := math.MaxFloat64 var chosen entry var bb Rect for _, en := range n.entries { initBoundingBox(&bb, en.bb, e.bb) d := bb.size() - en.bb.size() if d < diff || (d == diff && en.bb.size() < chosen.bb.size()) { diff = d chosen = en } } return tree.chooseNode(chosen.child, e, level) } // adjustTree splits overflowing nodes and propagates the changes upwards. func (tree *Rtree) adjustTree(n, nn *node) (*node, *node) { // Let the caller handle root adjustments. if n == tree.root { return n, nn } // Re-size the bounding box of n to account for lower-level changes. en := n.getEntry() en.bb = n.computeBoundingBox() // If nn is nil, then we're just propagating changes upwards. if nn == nil { return tree.adjustTree(n.parent, nil) } // Otherwise, these are two nodes resulting from a split. // n was reused as the "left" node, but we need to add nn to n.parent. enn := entry{nn.computeBoundingBox(), nn, nil} n.parent.entries = append(n.parent.entries, enn) // If the new entry overflows the parent, split the parent and propagate. if len(n.parent.entries) > tree.MaxChildren { return tree.adjustTree(n.parent.split(tree.MinChildren)) } // Otherwise keep propagating changes upwards. return tree.adjustTree(n.parent, nil) } // getEntry returns a pointer to the entry for the node n from n's parent. func (n *node) getEntry() *entry { var e *entry for i := range n.parent.entries { if n.parent.entries[i].child == n { e = &n.parent.entries[i] break } } return e } // computeBoundingBox finds the MBR of the children of n. func (n *node) computeBoundingBox() *Rect { var bb Rect for i, e := range n.entries { if i == 0 { bb = *e.bb } else { bb.enlarge(e.bb) } } return &bb } // split splits a node into two groups while attempting to minimize the // bounding-box area of the resulting groups. func (n *node) split(minGroupSize int) (left, right *node) { // find the initial split l, r := n.pickSeeds() leftSeed, rightSeed := n.entries[l], n.entries[r] // get the entries to be divided between left and right remaining := append(n.entries[:l], n.entries[l+1:r]...) remaining = append(remaining, n.entries[r+1:]...) // setup the new split nodes, but re-use n as the left node left = n left.entries = []entry{leftSeed} right = &node{ parent: n.parent, leaf: n.leaf, level: n.level, entries: []entry{rightSeed}, } // TODO if rightSeed.child != nil { rightSeed.child.parent = right } if leftSeed.child != nil { leftSeed.child.parent = left } // distribute all of n's old entries into left and right. for len(remaining) > 0 { next := pickNext(left, right, remaining) e := remaining[next] if len(remaining)+len(left.entries) <= minGroupSize { assign(e, left) } else if len(remaining)+len(right.entries) <= minGroupSize { assign(e, right) } else { assignGroup(e, left, right) } remaining = append(remaining[:next], remaining[next+1:]...) } return } func assign(e entry, group *node) { if e.child != nil { e.child.parent = group } group.entries = append(group.entries, e) } // assignGroup chooses one of two groups to which a node should be added. func assignGroup(e entry, left, right *node) { leftBB := left.computeBoundingBox() rightBB := right.computeBoundingBox() leftEnlarged := boundingBox(leftBB, e.bb) rightEnlarged := boundingBox(rightBB, e.bb) // first, choose the group that needs the least enlargement leftDiff := leftEnlarged.size() - leftBB.size() rightDiff := rightEnlarged.size() - rightBB.size() if diff := leftDiff - rightDiff; diff < 0 { assign(e, left) return } else if diff > 0 { assign(e, right) return } // next, choose the group that has smaller area if diff := leftBB.size() - rightBB.size(); diff < 0 { assign(e, left) return } else if diff > 0 { assign(e, right) return } // next, choose the group with fewer entries if diff := len(left.entries) - len(right.entries); diff <= 0 { assign(e, left) return } assign(e, right) } // pickSeeds chooses two child entries of n to start a split. func (n *node) pickSeeds() (int, int) { left, right := 0, 1 maxWastedSpace := -1.0 var bb Rect for i, e1 := range n.entries { for j, e2 := range n.entries[i+1:] { initBoundingBox(&bb, e1.bb, e2.bb) d := bb.size() - e1.bb.size() - e2.bb.size() if d > maxWastedSpace { maxWastedSpace = d left, right = i, j+i+1 } } } return left, right } // pickNext chooses an entry to be added to an entry group. func
(left, right *node, entries []entry) (next int) { maxDiff := -1.0 leftBB := left.computeBoundingBox() rightBB := right.computeBoundingBox() for i, e := range entries { d1 := boundingBox(leftBB, e.bb).size() - leftBB.size() d2 := boundingBox(rightBB, e.bb).size() - rightBB.size() d := math.Abs(d1 - d2) if d > maxDiff { maxDiff = d next = i } } return } // Deletion // Delete removes an object from the tree. If the object is not found, ok // is false; otherwise ok is true. // // Implemented per Section 3.3 of "R-trees: A Dynamic Index Structure for // Spatial Searching" by A. Guttman, Proceedings of ACM SIGMOD, p. 47-57, 1984. func (tree *Rtree) Delete(obj Spatial) bool { n := tree.findLeaf(tree.root, obj) if n == nil { return false } ind := -1 for i, e := range n.entries { if e.obj == obj { ind = i } } if ind < 0 { return false } n.entries = append(n.entries[:ind], n.entries[ind+1:]...) tree.condenseTree(n) tree.size-- if !tree.root.leaf && len(tree.root.entries) == 1 { tree.root = tree.root.entries[0].child } return true } // findLeaf finds the leaf node containing obj. func (tree *Rtree) findLeaf(n *node, obj Spatial) *node { if n.leaf { return n } // if not leaf, search all candidate subtrees for _, e := range n.entries { if e.bb.containsRect(obj.Bounds()) { leaf := tree.findLeaf(e.child, obj) if leaf == nil { continue } // check if the leaf actually contains the object for _, leafEntry := range leaf.entries { if leafEntry.obj == obj { return leaf } } } } return nil } // condenseTree deletes underflowing nodes and propagates the changes upwards. func (tree *Rtree) condenseTree(n *node) { deleted := []*node{} for n != tree.root { if len(n.entries) < tree.MinChildren { // remove n from parent entries entries := []entry{} for _, e := range n.parent.entries { if e.child != n { entries = append(entries, e) } } if len(n.parent.entries) == len(entries) { panic(fmt.Errorf("Failed to remove entry from parent")) } n.parent.entries = entries // only add n to deleted if it still has children if len(n.entries) > 0 { deleted = append(deleted, n) } } else { // just a child entry deletion, no underflow n.getEntry().bb = n.computeBoundingBox() } n = n.parent } for _, n := range deleted { // reinsert entry so that it will remain at the same level as before e := entry{n.computeBoundingBox(), n, nil} tree.insert(e, n.level+1) } } // Searching // SearchIntersectBB returns all objects that intersect the specified rectangle. // // Implemented per Section 3.1 of "R-trees: A Dynamic Index Structure for // Spatial Searching" by A. Guttman, Proceedings of ACM SIGMOD, p. 47-57, 1984. func (tree *Rtree) SearchIntersect(bb *Rect) []Spatial { results := []Spatial{} return tree.searchIntersect(tree.root, bb, results) } func (tree *Rtree) searchIntersect(n *node, bb *Rect, results []Spatial) []Spatial { for _, e := range n.entries { if intersect(e.bb, bb) { if n.leaf { results = append(results, e.obj) } else { results = tree.searchIntersect(e.child, bb, results) } } } return results } // NearestNeighbor returns the closest object to the specified point. // Implemented per "Nearest Neighbor Queries" by Roussopoulos et al func (tree *Rtree) NearestNeighbor(p Point) Spatial { obj, _ := tree.nearestNeighbor(p, tree.root, math.MaxFloat64, nil) return obj } // utilities for sorting slices of entries type entrySlice struct { entries []entry dists []float64 pt Point } func (s entrySlice) Len() int { return len(s.entries) } func (s entrySlice) Swap(i, j int) { s.entries[i], s.entries[j] = s.entries[j], s.entries[i] s.dists[i], s.dists[j] = s.dists[j], s.dists[i] } func (s entrySlice) Less(i, j int) bool { return s.dists[i] < s.dists[j] } func sortEntries(p Point, entries []entry) ([]entry, []float64) { sorted := make([]entry, len(entries)) dists := make([]float64, len(entries)) for i := 0; i < len(entries); i++ { sorted[i] = entries[i] dists[i] = p.minDist(entries[i].bb) } sort.Sort(entrySlice{sorted, dists, p}) return sorted, dists } func pruneEntries(p Point, entries []entry, minDists []float64) []entry { minMinMaxDist := math.MaxFloat64 for i := range entries { minMaxDist := p.minMaxDist(entries[i].bb) if minMaxDist < minMinMaxDist { minMinMaxDist = minMaxDist } } // remove all entries with minDist > minMinMaxDist pruned := []entry{} for i := range entries { if minDists[i] <= minMinMaxDist { pruned = append(pruned, entries[i]) } } return pruned } func (tree *Rtree) nearestNeighbor(p Point, n *node, d float64, nearest Spatial) (Spatial, float64) { if n.leaf { for _, e := range n.entries { dist := math.Sqrt(p.minDist(e.bb)) if dist < d { d = dist nearest = e.obj } } } else { branches, dists := sortEntries(p, n.entries) branches = pruneEntries(p, branches, dists) for _, e := range branches { subNearest, dist := tree.nearestNeighbor(p, e.child, d, nearest) if dist < d { d = dist nearest = subNearest } } } return nearest, d } func (tree *Rtree) NearestNeighbors(k int, p Point) []Spatial { dists := make([]float64, k) objs := make([]Spatial, k) for i := 0; i < k; i++ { dists[i] = math.MaxFloat64 objs[i] = nil } objs, _ = tree.nearestNeighbors(k, p, tree.root, dists, objs) return objs } // insert obj into nearest and return the first k elements in increasing order. func insertNearest(k int, dists []float64, nearest []Spatial, dist float64, obj Spatial) ([]float64, []Spatial) { i := 0 for i < k && dist >= dists[i] { i++ } if i >= k { return dists, nearest } left, right := dists[:i], dists[i:k-1] updatedDists := make([]float64, k) copy(updatedDists, left) updatedDists[i] = dist copy(updatedDists[i+1:], right) leftObjs, rightObjs := nearest[:i], nearest[i:k-1] updatedNearest := make([]Spatial, k) copy(updatedNearest, leftObjs) updatedNearest[i] = obj copy(updatedNearest[i+1:], rightObjs) return updatedDists, updatedNearest } func (tree *Rtree) nearestNeighbors(k int, p Point, n *node, dists []float64, nearest []Spatial) ([]Spatial, []float64) { if n.leaf { for _, e := range n.entries { dist := math.Sqrt(p.minDist(e.bb)) dists, nearest = insertNearest(k, dists, nearest, dist, e.obj) } } else { branches, branchDists := sortEntries(p, n.entries) branches = pruneEntries(p, branches, branchDists) for _, e := range branches { nearest, dists = tree.nearestNeighbors(k, p, e.child, dists, nearest) } } return nearest, dists }
pickNext
rows_test.go
// Copyright (c) 2017-2019 Snowflake Computing Inc. All right reserved. package gosnowflake import ( "context" "database/sql/driver" "fmt" "io" "net/http" "sync" "testing" "time" ) // test variables var ( rowsInChunk = 123 ) func TestRowsWithoutChunkDownloader(t *testing.T) { sts1 := "1" sts2 := "Test1" var i int cc := make([][]*string, 0) for i = 0; i < 10; i++ { cc = append(cc, []*string{&sts1, &sts2}) } rt := []execResponseRowType{ {Name: "c1", ByteLength: 10, Length: 10, Type: "FIXED", Scale: 0, Nullable: true}, {Name: "c2", ByteLength: 100000, Length: 100000, Type: "TEXT", Scale: 0, Nullable: false}, } cm := []execResponseChunk{} rows := new(snowflakeRows) rows.sc = nil rows.RowType = rt rows.ChunkDownloader = &snowflakeChunkDownloader{ sc: nil, ctx: context.Background(), Total: int64(len(cc)), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "", FuncDownload: nil, FuncDownloadHelper: nil, RowSet: rowSetType{JSON: cc}, } rows.ChunkDownloader.start() // var dest []driver.Value dest := make([]driver.Value, 2) for i = 0; i < len(cc); i++ { err := rows.Next(dest) if err != nil { t.Fatalf("failed to get value. err: %v", err) } if dest[0] != sts1 { t.Fatalf("failed to get value. expected: %v, got: %v", sts1, dest[0]) } if dest[1] != sts2 { t.Fatalf("failed to get value. expected: %v, got: %v", sts2, dest[1]) } } err := rows.Next(dest) if err != io.EOF { t.Fatalf("failed to finish getting data. err: %v", err) } logger.Infof("dest: %v", dest) } func downloadChunkTest(ctx context.Context, scd *snowflakeChunkDownloader, idx int) { d := make([][]*string, 0) for i := 0; i < rowsInChunk; i++ { v1 := fmt.Sprintf("%v", idx*1000+i) v2 := fmt.Sprintf("testchunk%v", idx*1000+i) d = append(d, []*string{&v1, &v2}) } scd.ChunksMutex.Lock() scd.Chunks[idx] = make([]chunkRowType, len(d)) populateJSONRowSet(scd.Chunks[idx], d) scd.DoneDownloadCond.Broadcast() scd.ChunksMutex.Unlock() } func TestRowsWithChunkDownloader(t *testing.T) { numChunks := 12 // changed the workers backupMaxChunkDownloadWorkers := MaxChunkDownloadWorkers MaxChunkDownloadWorkers = 2 logger.Info("START TESTS") var i int cc := make([][]*string, 0) for i = 0; i < 100; i++ { v1 := fmt.Sprintf("%v", i) v2 := fmt.Sprintf("Test%v", i) cc = append(cc, []*string{&v1, &v2}) } rt := []execResponseRowType{ {Name: "c1", ByteLength: 10, Length: 10, Type: "FIXED", Scale: 0, Nullable: true}, {Name: "c2", ByteLength: 100000, Length: 100000, Type: "TEXT", Scale: 0, Nullable: false}, } cm := make([]execResponseChunk, 0) for i = 0; i < numChunks; i++ { cm = append(cm, execResponseChunk{URL: fmt.Sprintf("dummyURL%v", i+1), RowCount: rowsInChunk}) } rows := new(snowflakeRows) rows.sc = nil rows.RowType = rt rows.ChunkDownloader = &snowflakeChunkDownloader{ sc: nil, ctx: context.Background(), Total: int64(len(cc) + numChunks*rowsInChunk), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "HAHAHA", FuncDownload: downloadChunkTest, RowSet: rowSetType{JSON: cc}, } rows.ChunkDownloader.start() cnt := 0 dest := make([]driver.Value, 2) var err error for err != io.EOF { err := rows.Next(dest) if err == io.EOF { break } if err != nil { t.Fatalf("failed to get value. err: %v", err) } // fmt.Printf("data: %v\n", dest) cnt++ } if cnt != len(cc)+numChunks*rowsInChunk { t.Fatalf("failed to get all results. expected:%v, got:%v", len(cc)+numChunks*rowsInChunk, cnt) } logger.Infof("dest: %v", dest) MaxChunkDownloadWorkers = backupMaxChunkDownloadWorkers logger.Info("END TESTS") } func downloadChunkTestError(ctx context.Context, scd *snowflakeChunkDownloader, idx int) { // fail to download 6th and 10th chunk, and retry up to N times and success // NOTE: zero based index scd.ChunksMutex.Lock() defer scd.ChunksMutex.Unlock() if (idx == 6 || idx == 10) && scd.ChunksErrorCounter < maxChunkDownloaderErrorCounter { scd.ChunksError <- &chunkError{ Index: idx, Error: fmt.Errorf( "dummy error. idx: %v, errCnt: %v", idx+1, scd.ChunksErrorCounter)} scd.DoneDownloadCond.Broadcast() return } d := make([][]*string, 0) for i := 0; i < rowsInChunk; i++ { v1 := fmt.Sprintf("%v", idx*1000+i) v2 := fmt.Sprintf("testchunk%v", idx*1000+i) d = append(d, []*string{&v1, &v2}) } scd.Chunks[idx] = make([]chunkRowType, len(d)) populateJSONRowSet(scd.Chunks[idx], d) scd.DoneDownloadCond.Broadcast() } func TestRowsWithChunkDownloaderError(t *testing.T) { numChunks := 12 // changed the workers backupMaxChunkDownloadWorkers := MaxChunkDownloadWorkers MaxChunkDownloadWorkers = 3 logger.Info("START TESTS") var i int cc := make([][]*string, 0) for i = 0; i < 100; i++ { v1 := fmt.Sprintf("%v", i) v2 := fmt.Sprintf("Test%v", i) cc = append(cc, []*string{&v1, &v2}) } rt := []execResponseRowType{ {Name: "c1", ByteLength: 10, Length: 10, Type: "FIXED", Scale: 0, Nullable: true}, {Name: "c2", ByteLength: 100000, Length: 100000, Type: "TEXT", Scale: 0, Nullable: false}, } cm := make([]execResponseChunk, 0) for i = 0; i < numChunks; i++ { cm = append(cm, execResponseChunk{URL: fmt.Sprintf("dummyURL%v", i+1), RowCount: rowsInChunk}) } rows := new(snowflakeRows) rows.sc = nil rows.RowType = rt rows.ChunkDownloader = &snowflakeChunkDownloader{ sc: nil, ctx: context.Background(), Total: int64(len(cc) + numChunks*rowsInChunk), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "HOHOHO", FuncDownload: downloadChunkTestError, RowSet: rowSetType{JSON: cc}, } rows.ChunkDownloader.start() cnt := 0 dest := make([]driver.Value, 2) var err error for err != io.EOF { err := rows.Next(dest) if err == io.EOF { break } if err != nil { t.Fatalf("failed to get value. err: %v", err) } // fmt.Printf("data: %v\n", dest) cnt++ } if cnt != len(cc)+numChunks*rowsInChunk { t.Fatalf("failed to get all results. expected:%v, got:%v", len(cc)+numChunks*rowsInChunk, cnt) } logger.Infof("dest: %v", dest) MaxChunkDownloadWorkers = backupMaxChunkDownloadWorkers logger.Info("END TESTS") } func downloadChunkTestErrorFail(ctx context.Context, scd *snowflakeChunkDownloader, idx int) { // fail to download 6th and 10th chunk, and retry up to N times and fail // NOTE: zero based index scd.ChunksMutex.Lock() defer scd.ChunksMutex.Unlock() if idx == 6 && scd.ChunksErrorCounter <= maxChunkDownloaderErrorCounter { scd.ChunksError <- &chunkError{ Index: idx, Error: fmt.Errorf( "dummy error. idx: %v, errCnt: %v", idx+1, scd.ChunksErrorCounter)} scd.DoneDownloadCond.Broadcast() return } d := make([][]*string, 0) for i := 0; i < rowsInChunk; i++ { v1 := fmt.Sprintf("%v", idx*1000+i) v2 := fmt.Sprintf("testchunk%v", idx*1000+i) d = append(d, []*string{&v1, &v2}) } scd.Chunks[idx] = make([]chunkRowType, len(d)) populateJSONRowSet(scd.Chunks[idx], d) scd.DoneDownloadCond.Broadcast() } func TestRowsWithChunkDownloaderErrorFail(t *testing.T)
func getChunkTestInvalidResponseBody(_ context.Context, _ *snowflakeChunkDownloader, _ string, _ map[string]string, _ time.Duration) ( *http.Response, error) { return &http.Response{ StatusCode: http.StatusOK, Body: &fakeResponseBody{body: []byte{0x12, 0x34}}, }, nil } func TestDownloadChunkInvalidResponseBody(t *testing.T) { numChunks := 2 cm := make([]execResponseChunk, 0) for i := 0; i < numChunks; i++ { cm = append(cm, execResponseChunk{URL: fmt.Sprintf( "dummyURL%v", i+1), RowCount: rowsInChunk}) } scd := &snowflakeChunkDownloader{ sc: &snowflakeConn{ rest: &snowflakeRestful{RequestTimeout: defaultRequestTimeout}, }, ctx: context.Background(), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "HOHOHO", FuncDownload: downloadChunk, FuncDownloadHelper: downloadChunkHelper, FuncGet: getChunkTestInvalidResponseBody, } scd.ChunksMutex = &sync.Mutex{} scd.DoneDownloadCond = sync.NewCond(scd.ChunksMutex) scd.Chunks = make(map[int][]chunkRowType) scd.ChunksError = make(chan *chunkError, 1) scd.FuncDownload(scd.ctx, scd, 1) select { case errc := <-scd.ChunksError: if errc.Index != 1 { t.Fatalf("the error should have caused with chunk idx: %v", errc.Index) } default: t.Fatal("should have caused an error and queued in scd.ChunksError") } } func getChunkTestErrorStatus(_ context.Context, _ *snowflakeChunkDownloader, _ string, _ map[string]string, _ time.Duration) ( *http.Response, error) { return &http.Response{ StatusCode: http.StatusBadGateway, Body: &fakeResponseBody{body: []byte{0x12, 0x34}}, }, nil } func TestDownloadChunkErrorStatus(t *testing.T) { numChunks := 2 cm := make([]execResponseChunk, 0) for i := 0; i < numChunks; i++ { cm = append(cm, execResponseChunk{URL: fmt.Sprintf( "dummyURL%v", i+1), RowCount: rowsInChunk}) } scd := &snowflakeChunkDownloader{ sc: &snowflakeConn{ rest: &snowflakeRestful{RequestTimeout: defaultRequestTimeout}, }, ctx: context.Background(), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "HOHOHO", FuncDownload: downloadChunk, FuncDownloadHelper: downloadChunkHelper, FuncGet: getChunkTestErrorStatus, } scd.ChunksMutex = &sync.Mutex{} scd.DoneDownloadCond = sync.NewCond(scd.ChunksMutex) scd.Chunks = make(map[int][]chunkRowType) scd.ChunksError = make(chan *chunkError, 1) scd.FuncDownload(scd.ctx, scd, 1) select { case errc := <-scd.ChunksError: if errc.Index != 1 { t.Fatalf("the error should have caused with chunk idx: %v", errc.Index) } serr, ok := errc.Error.(*SnowflakeError) if !ok { t.Fatalf("should have been snowflake error. err: %v", errc.Error) } if serr.Number != ErrFailedToGetChunk { t.Fatalf("message error code is not correct. msg: %v", serr.Number) } default: t.Fatal("should have caused an error and queued in scd.ChunksError") } }
{ numChunks := 12 // changed the workers logger.Info("START TESTS") var i int cc := make([][]*string, 0) for i = 0; i < 100; i++ { v1 := fmt.Sprintf("%v", i) v2 := fmt.Sprintf("Test%v", i) cc = append(cc, []*string{&v1, &v2}) } rt := []execResponseRowType{ {Name: "c1", ByteLength: 10, Length: 10, Type: "FIXED", Scale: 0, Nullable: true}, {Name: "c2", ByteLength: 100000, Length: 100000, Type: "TEXT", Scale: 0, Nullable: false}, } cm := make([]execResponseChunk, 0) for i = 0; i < numChunks; i++ { cm = append(cm, execResponseChunk{URL: fmt.Sprintf("dummyURL%v", i+1), RowCount: rowsInChunk}) } rows := new(snowflakeRows) rows.sc = nil rows.RowType = rt rows.ChunkDownloader = &snowflakeChunkDownloader{ sc: nil, ctx: context.Background(), Total: int64(len(cc) + numChunks*rowsInChunk), ChunkMetas: cm, TotalRowIndex: int64(-1), Qrmk: "HOHOHO", FuncDownload: downloadChunkTestErrorFail, RowSet: rowSetType{JSON: cc}, } rows.ChunkDownloader.start() cnt := 0 dest := make([]driver.Value, 2) var err error for err != io.EOF { err := rows.Next(dest) if err == io.EOF { break } if err != nil { logger.Infof( "failure was expected by the number of rows is wrong. expected: %v, got: %v", 715, cnt) break } // fmt.Printf("data: %v\n", dest) cnt++ } }
data.js
import * as api from './api.js'; const host = 'http://localhost:3030'; api.settings.host = host; export const login = api.login; export const register = api.register; export const logout = api.logout; export async function getMemes() { return await api.get(api.settings.host + '/data/memes?sortBy=_createdOn%20desc'); } export async function createMeme(body) { return await api.post(host + '/data/memes', body); } export async function getMemeDetails(id) { return await api.get(host + `/data/memes/${id}`); } export async function
(id, meme) { return await api.put(host + `/data/memes/${id}`, meme); } export async function deleteMeme(id) { return await api.del(host + `/data/memes/${id}`); } export async function findMemesByOwner() { const userId = sessionStorage.getItem('userId'); return await api.get(host + `/data/memes?where=_ownerId%3D%22${userId}%22&sortBy=_createdOn%20desc`); }
updateMeme
errors.py
class DimError(Exception):
error_types = dict( InvalidPoolError=2, InvalidIPError=3, InvalidVLANError=4, InvalidStatusError=5, InvalidPriorityError=6, InvalidGroupError=7, InvalidUserError=8, InvalidAccessRightError=9, InvalidZoneError=10, InvalidViewError=11, MultipleViewsError=12, InvalidParameterError=19, AlreadyExistsError=20, NotInPoolError=21, NotInDelegationError=22, PermissionDeniedError=23, HasChildrenError=24, ) for name, code in error_types.iteritems(): globals()[name] = type(name, (DimError,), {'code': code})
code = 1